I created the rasa x server in server mode using docker-compose (link, https://rasa.com/docs/rasa-x/installation-and-setup/install/docker-compose). I want to get intent and entities so I am hitting this URL from postman (https://2286950d1621.jp.ngrok.io/model/parse) with text. But I'm getting an HTML response instead of a JSON response.
Docker-compose.yml file
version: "3.4"
x-database-credentials: &database-credentials
DB_HOST: "db"
DB_PORT: "5432"
DB_USER: "${DB_USER:-admin}"
DB_PASSWORD: "${DB_PASSWORD}"
DB_LOGIN_DB: "${DB_LOGIN_DB:-rasa}"
x-rabbitmq-credentials: &rabbitmq-credentials
RABBITMQ_HOST: "rabbit"
RABBITMQ_USERNAME: "user"
RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD}
x-redis-credentials: &redis-credentials
REDIS_HOST: "redis"
REDIS_PORT: "6379"
REDIS_PASSWORD: ${REDIS_PASSWORD}
REDIS_DB: "1"
x-duckling-credentials: &duckling-credentials
RASA_DUCKLING_HTTP_URL: "http://duckling:8000"
x-rasax-credentials: &rasax-credentials
RASA_X_HOST: "http://rasa-x:5002"
RASA_X_USERNAME: ${RASA_X_USERNAME:-admin}
RASA_X_PASSWORD: ${RASA_X_PASSWORD:-}
RASA_X_TOKEN: ${RASA_X_TOKEN}
JWT_SECRET: ${JWT_SECRET}
RASA_USER_APP: "http://app:5055"
RASA_PRODUCTION_HOST: "http://rasa-production:5005"
RASA_WORKER_HOST: "http://rasa-worker:5005"
RASA_TOKEN: ${RASA_TOKEN}
x-rasa-credentials: &rasa-credentials
<<: *rabbitmq-credentials
<<: *rasax-credentials
<<: *database-credentials
<<: *redis-credentials
<<: *duckling-credentials
RASA_TOKEN: ${RASA_TOKEN}
RASA_MODEL_PULL_INTERVAL: 10
RABBITMQ_QUEUE: "rasa_production_events"
RASA_TELEMETRY_ENABLED: ${RASA_TELEMETRY_ENABLED:-true}
x-rasa-services: &default-rasa-service
restart: always
image: "rasa/rasa:${RASA_VERSION}-full"
volumes:
- ./.config:/.config
expose:
- "5005"
command: >
x
--no-prompt
--production
--config-endpoint http://rasa-x:5002/api/config?token=${RASA_X_TOKEN}
--port 5005
--jwt-method HS256
--jwt-secret ${JWT_SECRET}
--auth-token '${RASA_TOKEN}'
--enable-api
--cors "*"
depends_on:
- rasa-x
- rabbit
- redis
services:
rasa-x:
restart: always
image: "rasa/rasa-x:${RASA_X_VERSION}"
expose:
- "5002"
volumes:
- ./models:/app/models
- ./environments.yml:/app/environments.yml
- ./credentials.yml:/app/credentials.yml
- ./endpoints.yml:/app/endpoints.yml
- ./logs:/logs
- ./auth:/app/auth
environment:
<<: *database-credentials
<<: *rasa-credentials
SELF_PORT: "5002"
DB_DATABASE: "${DB_DATABASE:-rasa}"
RASA_MODEL_DIR: "/app/models"
PASSWORD_SALT: ${PASSWORD_SALT}
RABBITMQ_QUEUE: "rasa_production_events"
RASA_X_USER_ANALYTICS: "0"
SANIC_RESPONSE_TIMEOUT: "3600"
RUN_DATABASE_MIGRATION_AS_SEPARATE_SERVICE: "true"
depends_on:
- db
db-migration:
command: ["python", "-m", "rasax.community.services.db_migration_service"]
restart: always
image: "rasa/rasa-x:${RASA_X_VERSION}"
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8000/health || kill 1"]
interval: 5s
timeout: 1s
retries: 3
start_period: 2s
expose:
- "8000"
environment:
<<: *database-credentials
RUN_DATABASE_MIGRATION_AS_SEPARATE_SERVICE: "true"
depends_on:
- db
rasa-production:
<<: *default-rasa-service
environment:
<<: *rasa-credentials
RASA_ENVIRONMENT: "production"
DB_DATABASE: "tracker"
MPLCONFIGDIR: "/tmp/.matplotlib"
RASA_MODEL_SERVER: "http://rasa-x:5002/api/projects/default/models/tags/production"
rasa-worker:
<<: *default-rasa-service
environment:
<<: *rasa-credentials
RASA_ENVIRONMENT: "worker"
DB_DATABASE: "worker_tracker"
MPLCONFIGDIR: "/tmp/.matplotlib"
RASA_MODEL_SERVER: "http://rasa-x:5002/api/projects/default/models/tags/production"
app:
restart: always
image: "rasa/rasa-x-demo:${RASA_X_DEMO_VERSION}"
expose:
- "5055"
depends_on:
- rasa-production
db:
restart: always
image: "bitnami/postgresql:11.9.0"
expose:
- "5432"
environment:
POSTGRESQL_USERNAME: "${DB_USER:-admin}"
POSTGRESQL_PASSWORD: "${DB_PASSWORD}"
POSTGRESQL_DATABASE: "${DB_DATABASE:-rasa}"
volumes:
- ./db:/bitnami/postgresql
rabbit:
restart: always
image: "bitnami/rabbitmq:3.8.9"
environment:
RABBITMQ_HOST: "rabbit"
RABBITMQ_USERNAME: "user"
RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD}
RABBITMQ_DISK_FREE_LIMIT: "{mem_relative, 0.1}"
expose:
- "5672"
duckling:
restart: always
image: "rasa/duckling:0.1.6.4"
expose:
- "8000"
command: ["duckling-example-exe", "--no-access-log", "--no-error-log"]
nginx:
restart: always
image: "rasa/nginx:${RASA_X_VERSION}"
ports:
- "80:8080"
- "443:8443"
volumes:
- ./certs:/opt/bitnami/certs
- ./terms:/opt/bitnami/nginx/conf/bitnami/terms
depends_on:
- rasa-x
- rasa-production
- app
redis:
restart: always
image: "bitnami/redis:6.0.8"
environment:
REDIS_PASSWORD: ${REDIS_PASSWORD}
expose:
- "6379"
If you want to hit the Rasa Open Source server specifically, add /core to the endpoint -- so here: https://2286950d1621.jp.ngrok.io/core/model/parse
The /core tells the nginx proxy which service to send your request to. If you want to hit the Rasa X api, you should add /api.
What you are seeing currently is the Rasa X UI response, which is the default container (which is why you can log into the UI on the base url directly)
Related
I have the following docker-compose file.
But I am unable to connect to pgAdmin from the browser. I don't understand the issue as if I send data from postman then it gets saved to pgAdmin app which is on my local machine But when I visit localhost://5050/browser/ then that table does not appear in the browser.
version: '1'
services:
postgres:
container_name: postgres_container
image: 'postgres:latest'
environment:
POSTGRES_USER: ${POSTGRES_USERNAME}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DATABASE}
ports:
- 5432:5432
networks:
app_network:
ipv4_address: 172.26.0.11
restart: unless-stopped
healthcheck:
test: ['CMD', 'pg_isready -U postgres']
interval: 5s
timeout: 5s
retries: 5
volumes:
- postgres:/data/postgres
pgadmin:
container_name: pgadmin_container
image: dpage/pgadmin4
depends_on:
- postgres
volumes:
- pgadmin:/var/lib/pgadmin
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:-'email'}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-'password'}
PGADMIN_CONFIG_SERVER_MODE: 'False'
ports:
- '${PGADMIN_PORT:-5050}:80'
networks:
- app_network
restart: unless-stopped
networks:
app_network:
external: true
driver: bridge
volumes:
postgres:
pgadmin:
The tables from my pgAdmin app.
The tables from the localhost:5050/browser/ link.
I have correctly set the connection for pgAdmin in browser.
I create secret "databasePassword" using the below command:
echo 123456 | docker secret create databasePassword -
Below is my yml file to create the MySQL and phpMyAdmin where I'm trying to use this secret in the yml file.
version: '3.1'
services:
db:
image: mysql
command: --default-authentication-plugin=mysql_native_password
restart: unless-stopped
container_name: db-mysql
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: 'false'
MYSQL_ROOT_PASSWORD: /run/secrets/databasePassword
ports:
- 3306:3306
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
timeout: 20s
retries: 10
secrets:
- databasePassword
beyond-phpmyadmin:
image: phpmyadmin
restart: unless-stopped
container_name: beyond-phpmyadmin
environment:
PMA_HOST: db-mysql
PMA_PORT: 3306
PMA_ARBITRARY: 1
links:
- db
ports:
- 8081:80
But the databasePassword is not getting set as 123456. It is set as the string "/run/secrets/databasePassword" I tried using docker stack deploy also, but it also didn't work.
I tried setting the secrets at the end of the file like below by some web research, but it also didn't work.
version: '3.1'
services:
db:
image: mysql
command: --default-authentication-plugin=mysql_native_password
restart: unless-stopped
container_name: db-mysql
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: 'false'
MYSQL_ROOT_PASSWORD: /run/secrets/databasePassword
ports:
- 3306:3306
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
timeout: 20s
retries: 10
secrets:
- databasePassword
beyond-phpmyadmin:
image: phpmyadmin
restart: unless-stopped
container_name: beyond-phpmyadmin
environment:
PMA_HOST: db-mysql
PMA_PORT: 3306
PMA_ARBITRARY: 1
links:
- db
ports:
- 8081:80
secrets:
databasePassword:
external: true
Docker cannot know that /run/secrets/databasePassword is not a literal value of the MYSQL_ROOT_PASSWORD variable, but a path to a file that you would like to read the secret from. That's not how secrets work. They are simply available in a /run/secrets/<secret-name> file inside the container. To use a secret, your container needs to read it from the file.
Fortunatelly for you, the mysql image knows how to do it. Simply use MYSQL_ROOT_PASSWORD_FILE instead of MYSQL_ROOT_PASSWORD:
services:
db:
image: mysql
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: 'false'
MYSQL_ROOT_PASSWORD_FILE: /run/secrets/databasePassword
secrets:
- databasePassword
...
secrets:
databasePassword:
external: true
See "Docker Secrets" in the mysql image documentation.
I have the following network as reverse proxy on my server
version: '3.7'
services:
nginx-proxy:
image: jwilder/nginx-proxy
container_name: nginx-proxy
ports:
- "80:80"
- "443:443"
volumes:
- html:/usr/share/nginx/html
- dhparam:/etc/nginx/dhparam
- vhost:/etc/nginx/vhost.d
- certs:/etc/nginx/certs:ro
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./client_max_upload_size.conf:/etc/nginx/conf.d/client_max_upload_size.conf
- ./www.domain.com:/etc/nginx/vhost.d/www.domain.com
labels:
- "com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy"
restart: always
networks:
- net
letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
container_name: lets-encrypt-proxy-companion
depends_on:
- "nginx-proxy"
volumes:
- certs:/etc/nginx/certs:rw
- vhost:/etc/nginx/vhost.d
- html:/usr/share/nginx/html
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
DEFAULT_EMAIL:mail#gmail.com
restart: always
networks:
- net
volumes:
certs:
html:
vhost:
dhparam:
networks:
net:
external: true
This setup works well and i am able to deploy multiple containers and everything works.
I'm trying to create an open-project container on this server and i wish for it to use the "net" proxy.
I have tried removing the proxy from the 'openproject' compose file and it didn't help
version: "3.7"
networks:
net:
external: true
backend:
volumes:
pgdata:
opdata:
x-op-restart-policy: &restart_policy
restart: unless-stopped
x-op-image: &image
image: openproject/community:${TAG:-12}
x-op-app: &app
<<: *image
<<: *restart_policy
environment:
RAILS_CACHE_STORE: "memcache"
OPENPROJECT_CACHE__MEMCACHE__SERVER: "cache:11211"
OPENPROJECT_RAILS__RELATIVE__URL__ROOT: "${OPENPROJECT_RAILS__RELATIVE__URL__ROOT:-}"
DATABASE_URL: "${DATABASE_URL:-postgres://postgres:p4ssw0rd#db/openproject?pool=20&encoding=unicode&reconnect=true}"
RAILS_MIN_THREADS: 4
RAILS_MAX_THREADS: 16
# set to true to enable the email receiving feature. See ./docker/cron for more options
IMAP_ENABLED: "${IMAP_ENABLED:-false}"
volumes:
- "${OPDATA:-opdata}:/var/openproject/assets"
services:
db:
image: postgres:13
<<: *restart_policy
stop_grace_period: "3s"
volumes:
- "${PGDATA:-pgdata}:/var/lib/postgresql/data"
environment:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-p4ssw0rd}
POSTGRES_DB: openproject
networks:
- backend
cache:
image: memcached
<<: *restart_policy
networks:
- backend
proxy:
<<: *image
<<: *restart_policy
command: "./docker/prod/proxy"
ports:
- "${PORT:-8080}:80"
environment:
APP_HOST: web
OPENPROJECT_RAILS__RELATIVE__URL__ROOT: "${OPENPROJECT_RAILS__RELATIVE__URL__ROOT:-}"
VIRTUAL_HOST: ${SITE_DOMAIN},www.${SITE_DOMAIN}
LETSENCRYPT_HOST: ${SITE_DOMAIN},www.${SITE_DOMAIN}
depends_on:
- web
networks:
- net
web:
<<: *app
command: "./docker/prod/web"
networks:
- net
- backend
depends_on:
- db
- cache
- seeder
labels:
- autoheal=true
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080${OPENPROJECT_RAILS__RELATIVE__URL__ROOT:-}/health_checks/default"]
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
autoheal:
image: willfarrell/autoheal:1.2.0
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
AUTOHEAL_CONTAINER_LABEL: autoheal
worker:
<<: *app
command: "./docker/prod/worker"
networks:
- backend
depends_on:
- db
- cache
- seeder
cron:
<<: *app
command: "./docker/prod/cron"
networks:
- backend
depends_on:
- db
- cache
- seeder
seeder:
<<: *app
command: "./docker/prod/seeder"
restart: on-failure
networks:
- backend
I have tried defining the network in the compose file, but it doesn't work
any suggestions?
I solved it by removing the ports definition from the proxy service in the OP compose file.
I also increased the max execution time in the reverse proxy.
I feel like its highly stupid question, but I just cant log into my pgAdmin.
My compose.yml file is
services:
back:
container_name: blog-django
build: ./blog-master
command: gunicorn blog.wsgi:application --bind 0.0.0.0:8000
expose:
- 8000
links:
- db
volumes:
- blog-django:/usr/src/app/
- blog-static:/usr/src/app/static
- blog-media:/usr/src/app/media
env_file: ./.env
depends_on:
db:
condition: service_healthy
nginx:
container_name: blog-nginx
build: ./nginx/
ports:
- "1337:80"
volumes:
- blog-static:/usr/src/app/static
- blog-media:/usr/src/app/media
links:
- back
depends_on:
- back
db:
container_name: db
image: postgres:14
restart: always
expose:
- "5432"
environment:
POSTGRES_DB: docker
POSTGRES_USER: docker
POSTGRES_PASSWORD: docker
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data/
healthcheck:
test: ["CMD-SHELL", "pg_isready -U docker"]
interval: 5s
timeout: 5s
retries: 5
pgadmin:
image: dpage/pgadmin4
container_name: pgadmin
restart: always
ports:
- "5050:80"
environment:
PGADMIN_DEFAULT_EMAIL: admin#admin.com
PGADMIN_DEFAULT_PASSWORD: root
volumes:
- pgadmin-data:/var/lib/pgadmin
depends_on:
- db
links:
- db
mailhog:
container_name: mailhog
image: mailhog/mailhog
#logging:
# driver: 'none' # disable saving logs
expose:
- 1025
ports:
- 1025:1025 # smtp server
- 8025:8025 # web ui
volumes:
blog-django:
blog-static:
blog-media:
pgdata:
pgadmin-data:
and my .env file is
DEBUG=1
SECRET_KEY='a key'
DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]
SQL_ENGINE=django.db.backends.postgresql
SQL_DATABASE=docker
SQL_USER=docker
SQL_PASSWORD=docker
SQL_HOST=db
SQL_PORT=5432
DATABASE=postgres
but with any username/email and password it keeps saying "wrong email/password". although if I login with "admin#admin.com" error is "Incorrect username or password" and with username it says "email/username is not valid"
I tried using admin#mailhog.local email so I can do password recovery, but again, "Incorrect username or password"
I have the following docker-compose.yml:
#######################################
# Kong database
#######################################
kong-database:
image: postgres:9.6
container_name: kong-database
environment:
POSTGRES_DB: kong
POSTGRES_USER: kong
POSTGRES_PASSWORD: kong
DB_PASSWORD: kong
healthcheck:
test: ["CMD", "pg_isready", "-U", "kong"]
interval: 30s
timeout: 30s
retries: 3
restart: on-failure
deploy:
restart_policy:
condition: on-failure
stdin_open: true
tty: true
networks:
- kong-net
ports:
- "5432:5432"
volumes:
- kong_data:/var/lib/postgresql/data
konga:
image: pantsel/konga:next
container_name: konga
restart: always
networks:
- kong-net
environment:
DB_ADAPTER: postgres
DB_HOST: kong-database
DB_PORT: 5432
DB_USER: kong
DB_PASSWORD: kong
DB_DATABASE: konga_db
POSTGRES_DB: kong
POSTGRES_USER: kong
POSTGRES_PASSWORD: kong
TOKEN_SECRET: km1GUr4RkcQD7DewhJPNXrCuZwcKmqjb
NODE_ENV: production
NODE_TLS_REJECT_UNAUTHORIZED: 0
depends_on:
- kong-database
- kong
ports:
- "1337:1337"
I want when Konga service run, create one database called konga_db
how can I handle this problem in docker-compose?