I have a Docker container running WireGuard (https://docs.linuxserver.io/images/docker-wireguard)
Config:
version: "2.1"
services:
wireguard:
image: lscr.io/linuxserver/wireguard
container_name: my-wireguard
cap_add:
- NET_ADMIN
- SYS_MODULE
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Moscow
- SERVERURL=193.168.227.155
- SERVERPORT=51820
- PEERS=1
- PEERDNS=1.1.1.1
- INTERNAL_SUBNET=10.13.13.0
- ALLOWEDIPS=0.0.0.0/0
volumes:
- /home/wireguard:/config
- /lib/modules:/lib/modules
ports:
- 51820:51820/udp
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
restart: unless-stopped
I need to run BIRD for clients connected with WireGuard
Config:
log syslog all;
router id 10.13.13.0;
protocol kernel {
scan time 60;
ipv4 {
import none;
export none;
};
}
protocol device {
scan time 60;
}
protocol static {
ipv4;
include "subnet.txt";
include "ip.txt";
include "my.txt";
}
protocol bgp {
local 10.13.13.0 as 64999;
neighbor 10.13.13.2 as 64999;
passive off;
ipv4 {
import none;
export all;
next hop self;
};
}
protocol bgp {
local 10.13.13.0 as 64999;
neighbor 10.13.13.3 as 64999;
passive off;
ipv4 {
import none;
export all;
next hop self;
};
}
With this config BIRD cannot connect, I get "bgp1: Socket error: bind: Cannot assign requested address" errors
I understand that additional configuration is required for this to work, but I'm really unfamiliar with Docker networking
Related
I have a project, which consist of Go application + Nginx + Db(Postgres). All are building in docker containers.
It is my docker-compose.yml file:
version: "3"
services:
db:
image: postgres:10
environment:
- POSTGRES_PASSWORD=DatabasePassword
- POSTGRES_USER=egor
- POSTGRES_DB=postgres
expose:
- 5432
backend:
build: .
environment:
- POSTGRES_URL=postgres://egor:DatabasePassword#db:5432/postgres?sslmode=disable
- LISTEN_ADDRESS=:5432
depends_on:
- db
proxy:
image: nginx
volumes:
- type: bind
source: ./nginx.conf
target: /etc/nginx/nginx.conf
ports:
- 80:80
depends_on:
- backend
- db
it is my go application:
package main
import (
"database/sql"
"fmt"
"time"
_ "github.com/lib/pq"
"log"
"net/http"
"github.com/caarlos0/env"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type config struct {
PostgresUri string `env:"POSTGRES_URL" envDefault:"postgres://root:pass#localhost:5432/postgres?sslmode=disable"`
ListenAddress string `env:"LISTEN_ADDRESS" envDefault:":7000"`
//PostgresHost string `env:"POSTGRES_HOST" envDefault:":l"`
//PostgresUser string `env:"POSTGRES_USER" envDefault:":root"`
//PostgresPassword string `env:"POSTGRES_PASSWD" envDefault:":qwerty"`
//PostgresName string `env:"POSTGRES_NAME" envDefault:":postgres"`
}
var (
db *sql.DB
errorsCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gocalc_errors_count",
Help: "Gocalc Errors Count Per Type",
},
[]string{"type"},
)
requestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gocalc_requests_count",
Help: "Gocalc Requests Count",
})
)
func main() {
var err error
// Initing prometheus
prometheus.MustRegister(errorsCount)
prometheus.MustRegister(requestsCount)
// Getting env
cfg := config{}
if err = env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
time.Sleep(time.Second)
fmt.Println("Sleep over!")
// Connecting to database
//psqlInfo := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=5432 sslmode=disable",
// cfg.PostgresHost,cfg.ListenAddress,cfg.PostgresUser,cfg.PostgresPassword,cfg.PostgresName)
//db, err := sql.Open("postgres", "host=db user=egor password=DatabasePassword dbname=postgres port=5432 sslmode=disable")
db, err = sql.Open("postgres",cfg.PostgresUri)
if err != nil {
log.Fatalf("Can't connect to postgresql: %v", err)
}
defer db.Close()
err = db.Ping()
if err != nil {
log.Fatalf("Can't ping database: %v", err)
}
http.HandleFunc("/", handler)
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(cfg.ListenAddress, nil))
}
func handler(w http.ResponseWriter, r *http.Request) {
requestsCount.Inc()
keys, ok := r.URL.Query()["q"]
if !ok || len(keys[0]) < 1 {
errorsCount.WithLabelValues("missing").Inc()
log.Println("Url Param 'q' is missing")
http.Error(w, "Bad Request", 400)
return
}
q := keys[0]
log.Println("Got query: ", q)
var result string
sqlStatement := fmt.Sprintf("SELECT (%s)::numeric", q)
row := db.QueryRow(sqlStatement)
err := row.Scan(&result)
if err != nil {
log.Println("Error from db: %s", err)
errorsCount.WithLabelValues("db").Inc()
http.Error(w, "Internal Server Error", 500)
return
}
fmt.Fprintf(w, "query %s; result %s", q, result)
}
And my nginx configuration:
events{
worker_connections 1024;
}
http{
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://backend:7000;
}
}
}
But when i'm going to try page in browser, i see error page - 502 Bad Gateway nginx.
It is my log:
2022/11/08 23:41:24 [error] 29#29: *1 connect() failed (111: Connection refused) while connecting to upstream, client: xxx.xx.x.x, server: localhost, request: "GET / HTTP/1.1", upstream: "http://xxx.xx.x.x:7000/", host: "0.0.0.0"
What is problem? All services work correctly, only nginx reversy proxy has error
I just put together a small project that represents your scenario. This is the repository structure:
webapp/
nginx/
Dockerfile
nginx.conf
web/
Dockerfile
main.go
docker-compose.yaml
The content of each file are as follows.
nginx/nginx.conf
events{}
http {
server {
listen 80;
location / {
proxy_pass http://backend:7000;
}
}
}
More or less is your same file.
nginx/Dockerfile
FROM nginx
EXPOSE 80
COPY nginx.conf /etc/nginx/nginx.conf
Here, we specify instructions to build the nginx container. We expose only the port 80.
web/main.go
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, World!\n")
})
http.ListenAndServe(":7000", nil)
}
Simple HTTP server with a hard-coded reply. This HTTP server listens for requests on port 7000.
web/Dockerfile
FROM golang:1.12.7-alpine3.10 AS build
WORKDIR /go/src/app
COPY ./main.go ./main.go
RUN go build -o ./bin/gowebserver ./main.go
FROM alpine:latest
COPY --from=build /go/src/app/bin /go/bin
EXPOSE 7000
ENTRYPOINT go/bin/gowebserver
Here, we use the multi-stage build. In the first section we build the HTTP server while in the second one, we copy the executable on a leaner base image of Docker. We expose port 7000 of this container.
docker-compose.yaml
version: "3"
services:
backend:
build: "./web"
expose:
- "7000"
nginx:
build: "./nginx"
ports:
- "80:80"
depends_on:
- "backend"
Here, is the last part that connects all. We expose to the outside only the port 80. Internally, the backend service exposes port 7000 to be contacted by the nginx service.
To spin up everything, you've to run these two commands (in the root folder of the project):
docker-compose build
docker-compose up
To test this solution you've to use your internal IP address (in my case was something like 192.168.1.193) and navigate to the URL http://192.168.1.193/ which should give you an Hello, World! message.
Let me know if this solves your issue!
I have a dockerized back-end with golang gin server, postgresql and redis.
Everything starts correctly with this docker-compose.yaml file :
version: '3.9'
services:
postgresql:
image: 'postgres:13.1-alpine'
volumes:
- data:/var/lib/postgresql/data
env_file:
- ./env/postgre.env
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
ports:
- '5432:5432'
server:
build: ./server
ports:
- '8000:8000'
volumes:
- ./server:/app
depends_on:
- postgresql
redis:
image: "redis"
ports:
- "6379:6379"
volumes:
- $PWD/redis-data:/var/lib/redis
volumes:
data:
Than I initialize redis in main func :
func main() {
util.InitializeRedis()
(...)
// InitializeRedis func
func newPool() *redis.Pool {
return &redis.Pool{
MaxIdle:3,
IdleTimeout:240 * time.Second,
DialContext: func(context.Context) (redis.Conn, error) {
return redis.Dial("tcp",":6379")
},
}
}
var (
pool *redis.Pool
)
func InitializeRedis() {
flag.Parse()
pool = newPool()
}
It doesn't prompt any error, but I cannot get connection with pool.Get in another function :
// Handle "/redis" for test
router.GET("/redis", util.ServeHome)
// ServeHome func
func ServeHome(ctx *gin.Context){
conn := pool.Get()
defer conn.Close()
var p1 struct{
Title string `redis:"title" json:"title"`
Author string `redis:"author" json:"author"`
Body string `redis:"body" json:"body"`
}
p1.Title = "Example"
p1.Author = "Gary"
p1.Body = "Hello"
if _, err := conn.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil {
log.Fatalf("Error occured with redis HMSEET, %v", err) // Error in console is from here
return
}
(...)
And when I try to access /redis with Insomnia it shows: Error: Server returned nothing (no headers, no data) and in console logs : Error occured with redis HMSEET, dial tcp :6379: connect: connection refused
I couldn't find any article which solve this problem for me, so I do appreciate any help.
Since you're using docker-compose Redis won't be available on :6379, instead it will be available on the hostname redis.
I think you'll need to update your code to the following:
redis.Dial("tcp","redis:6379")
I want to containerize my web applications. Currently, I am using Apache to provide a couple of PHP apps.
Every app should be provided by their own container.
Nginx should be reachable by port 80/443. Depending on the sub route it should proxying to one of the containers.
For example:
www.url.de/hello1 --> hello1:80
www.url.de/hello2 --> hello2:80
docker-compose.yml:
version: '3'
services:
nginx:
image: nginx:latest
container_name: reverse_proxy
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
ports:
- "80:80"
- "443:443"
networks:
- app-network
depends_on:
- hello1
- hello2
hello1:
build: ./test1
image: hello1
container_name: hello1
expose:
- "80"
networks:
- app-network
hello2:
build: ./test2
image: hello2
container_name: hello2
expose:
- "80"
networks:
- app-network
networks:
app-network:
nginx.conf:
events {
}
http {
error_log /etc/nginx/error_log.log warn;
client_max_body_size 20m;
proxy_cache_path /etc/nginx/cache keys_zone=one:500m max_size=1000m;
server {
server_name wudio.de;
location / {
proxy_pass http://hello1:80;
}
location /hello1/ {
proxy_pass http://hello1:80;
rewrite ^/hello1(.*)$ $1 break;
}
location /hello2/ {
proxy_pass http://hello2:80;
rewrite ^/hello2(.*)$ $1 break;
}
}
}
If I run docker-compose up -d, only the container with image webapp-test1 is online. And I also can reach it by curl localhost:8081.
Nginx is not running. If I remove the line in which I add nginx.conf to the volume of Nginx, it´s working.
What I´m doing wrong?
Edit1:
http:// was missing. But proxying still not working on subroutes. Only location / is working. How I get /hell1 running?
Note the proxy_pass statement. You have to mention the protocol in that statement. Also note how you can refer to the name of the service in your docker-compose.yml file (in this case hello1).
events {
}
http {
error_log /etc/nginx/error_log.log warn;
client_max_body_size 20m;
proxy_cache_path /etc/nginx/cache keys_zone=one:500m max_size=1000m;
server {
listen 80;
location / {
try_files $uri #proxy ;
}
location #proxy {
proxy_pass http://hello1:80/;
}
}
}
Edit: Try this instead
events {
}
http {
error_log /etc/nginx/error_log.log warn;
client_max_body_size 20m;
proxy_cache_path /etc/nginx/cache keys_zone=one:500m max_size=1000m;
server {
listen 80;
location / {
try_files $uri #proxy ;
}
location #proxy {
if ($request_uri ~* "^\/hello1(\/.*)$") {
set $url "http://hello1:80$1";
}
if ($request_uri ~* "^\/hello2(\/.*)$") {
set $url "http://hello2:80$1";
}
proxy_pass "$url"
}
}
}
I'm trying to make a monitoring stack with traefik, grafana, zabbix, gotify etc.
I've a domain name called domain.tld.
In my docker-compose, I've some services with different port (grafana for example), but I've also some services on the same port (gotify, zabbix).
I want to redirect my domain.tld with zabbix.domain.tld, grafana.domain.tld to each container with SSL.
It's works, but not exactly.
If I put in my address bar:
grafana.domain.tld -> 404 Error with SSL redirection
If I put in my address bar:
grafana.domain.tld:3000 -> It's ok
I think, I'm little lost (or completely ?) in my many modifications..
Just doc and me is not enought.
So, my docker-compose:
version: '3.5'
networks:
traefik_front:
external: true
services:
traefik:
image: traefik
command: --api --docker
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "8080:8080"
volumes:
- "${TRAEFIK_PATH}/traefik.toml:/etc/traefik/traefik.toml"
- "${TRAEFIK_PATH}/acme.json:/acme.json"
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "traefik.frontend.rule=Host:traefik.${DOMAIN}"
- "treafik.port=8080"
- "traefik.enable=true"
- "traefik.backend=traefik"
- "traefik.docker.network=traefik_front"
#- "traefik.frontend.entryPoints=http,https"
networks:
- traefik_front
gotify:
image: gotify/server
container_name: gotify
volumes:
- "${GOTIFY_PATH}:/app/data"
env_file:
- env/.env_gotify
labels:
- "traefik.frontend.rule=Host:push.${DOMAIN}"
- "traefik.port=80"
- "traefik.enable=true"
- "traefik.backend=gotify"
- "traefik.docker.network=traefik_front"
networks:
- traefik_front
- default
grafana:
image: grafana/grafana
container_name: grafana
volumes:
- "${GF_PATH}:/var/lib/grafana"
env_file:
- env/.env_grafana
labels:
- "traefik.frontend.rule=Host:grafana.${DOMAIN}"
- "traefik.port=3000"
- "traefik.enable=true"
- "traefik.backend=grafana"
- "traefik.docker.network=traefik_front"
networks:
- traefik_front
- default
zabbix-server:
image: zabbix/zabbix-server-mysql:ubuntu-4.0-latest
volumes:
- "${ZABBIX_PATH}/alertscripts:/usr/lib/zabbix/alertscripts:ro"
- "${ZABBIX_PATH}/externalscripts:/usr/lib/zabbix/externalscripts:ro"
- "${ZABBIX_PATH}/modules:/var/lib/zabbix/modules:ro"
- "${ZABBIX_PATH}/enc:/var/lib/zabbix/enc:ro"
- "${ZABBIX_PATH}/ssh_keys:/var/lib/zabbix/ssh_keys:ro"
- "${ZABBIX_PATH}/mibs:/var/lib/zabbix/mibs:ro"
- "${ZABBIX_PATH}/snmptraps:/var/lib/zabbix/snmptraps:ro"
links:
- mysql-server:mysql-server
env_file:
- env/.env_zabbix_db_mysql
- env/.env_zabbix_srv
user: root
depends_on:
- mysql-server
- zabbix-snmptraps
labels:
- "traefik.backend=zabbix-server"
- "traefik.port=10051"
zabbix-web-apache-mysql:
image: zabbix/zabbix-web-apache-mysql:ubuntu-4.0-latest
links:
- mysql-server:mysql-server
- zabbix-server:zabbix-server
volumes:
- "${ZABBIX_PATH}/ssl/apache2:/etc/ssl/apache2:ro"
env_file:
- env/.env_zabbix_db_mysql
- env/.env_zabbix_web
user: root
depends_on:
- mysql-server
- zabbix-server
labels:
- "traefik.frontend.rule=Host:zabbix.${DOMAIN}"
- "traefik.port=80"
- "traefik.enable=true"
- "traefik.backend=zabbix-web"
- "traefik.docker.network=traefik_front"
networks:
- traefik_front
- default
zabbix-agent:
image: zabbix/zabbix-agent:ubuntu-4.0-latest
ports:
- "10050:10050"
volumes:
- "${ZABBIX_PATH}/zabbix_agentd.d:/etc/zabbix/zabbix_agentd.d:ro"
- "${ZABBIX_PATH}/modules:/var/lib/zabbix/modules:ro"
- "${ZABBIX_PATH}/enc:/var/lib/zabbix/enc:ro"
- "${ZABBIX_PATH}/ssh_keys:/var/lib/zabbix/ssh_keys:ro"
links:
- zabbix-server:zabbix-server
env_file:
- env/.env_zabbix_agent
user: root
networks:
- default
zabbix-snmptraps:
image: zabbix/zabbix-snmptraps:ubuntu-4.0-latest
ports:
- "162:162/udp"
volumes:
- "${ZABBIX_PATH}/snmptraps:/var/lib/zabbix/snmptraps:rw"
user: root
networks:
- default
mysql-server:
image: mysql:5.7
command: [mysqld, --character-set-server=utf8, --collation-server=utf8_bin]
volumes:
- /var/lib/mysql:/var/lib/mysql:rw
env_file:
- env/.env_zabbix_db_mysql
labels:
- "traefik.enable=false"
user: root
networks:
- default
And my traefik.toml:
# WEBUI
[web]
entryPoint = "dashboard"
dashboard = true
address = ":8080"
usersFile = "/etc/docker/traefik/.htpasswd"
logLevel = "ERROR"
# Force HTTPS
defaultEntryPoints = ["http", "https"]
[entryPoints]
[entryPoints.dashboard]
address = ":8080"
[entryPoints.http]
address = ":80"
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.https]
address = ":443"
[entryPoints.https.tls]
[docker]
endpoint = "unix:///var/run/docker.sock"
watch = true
exposedbydefault = false
domain = "domain.tld"
network = "traefik_front"
# Let's Encrypt
[acme]
email = "mail#mail.fr"
storage = "acme.json"
entryPoint = "https"
onHostRule = true
onDemand = false
[acme.httpChallenge]
entryPoint = "http"
OnHostRule = true
[[acme.domains]]
main = "domain.tld"
[[acme.domains]]
main = "domain.tld"
[[acme.domains]]
main = "domain.tld"
[[acme.domains]]
main = "domain.tld"
I've done something similar, and it would look this on your setup
docker-compose.yml
service:
traefik:
labels:
- "treafik.port=8080"
- "traefik.enable=true"
- "traefik.backend=traefik"
- "traefik.docker.network=traefik_front"
- "traefik.frontend.rule=Host:traefik.${DOMAIN}"
- "traefik.webservice.frontend.entryPoints=https"
zabbix-web-apache-mysql:
labels:
- "traefik.port=80"
- "traefik.enable=true"
- "traefik.backend=zabbix-web"
- "traefik.passHostHeader=true"
- "traefik.docker.network=traefik_front"
- "traefik.frontend.rule=Host:zabbix.${DOMAIN}"
grafana:
labels:
- "traefik.port=3000"
- "traefik.enable=true"
- "traefik.backend=grafana"
- "traefik.passHostHeader=true"
- "traefik.docker.network=traefik_front"
- "traefik.frontend.rule=Host:grafana.${DOMAIN}"
and the way my traefik.toml is configured
InsecureSkipVerify = true ## This is optional
## Force HTTPS
[entryPoints]
[entryPoints.http]
passHostHeader = true
address = ":80"
[entryPoints.http.forwardedHeaders]
insecure = true
[entryPoints.http.proxyProtocol]
insecure = true
## This seems to be an absolute requirement for redirect
## ...but it redirects every request to https
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.traefik]
address = ":8080"
[entryPoints.traefik.auth.basic]
# the "user" password is the MD5 encrpytion of the word "pass"
users = ["user:$apr1$.LWU4fEi$4YipxeuXs5T0xulH3S7Kb."]
[entryPoints.https]
passHostHeader = true
address = ":443"
[entryPoints.https.tls] ## This seems to be an absolute requirement
[entryPoints.https.forwardedHeaders]
insecure = true
[entryPoints.https.proxyProtocol]
insecure = true
I'm running into a problem with running browsersync via Gulp inside a docker container. I'm using Laradock.
I tried to extrapolate from this answer: Browsersync within a Docker container, but I'm only able to get the UI to show on port 3001.
It's unclear to me what I should have for proxy. I've tried many combinations such as:
function browserSync(done) {
browsersync.init({
proxy: 'workspace:22', notify:false, open:false
});
done();
}
// -----------------
function browserSync(done) {
browsersync.init({
proxy: 'workspace:3000', notify:false, open:false
});
done();
}
// ---------------
function browserSync(done) {
browsersync.init({
proxy: 'localhost', notify:false, open:false
});
done();
}
I've added 3000 and 3001 to docker-compose.yml under my workspace service
### Workspace Utilities ##################################
workspace:
build:
context: ./workspace
args:
- LARADOCK_PHP_VERSION=${PHP_VERSION}
- LARADOCK_PHALCON_VERSION=${PHALCON_VERSION}
- INSTALL_SUBVERSION=${WORKSPACE_INSTALL_SUBVERSION}
- INSTALL_XDEBUG=${WORKSPACE_INSTALL_XDEBUG}
- INSTALL_PHPDBG=${WORKSPACE_INSTALL_PHPDBG}
- INSTALL_BLACKFIRE=${INSTALL_BLACKFIRE}
- INSTALL_SSH2=${WORKSPACE_INSTALL_SSH2}
- INSTALL_GMP=${WORKSPACE_INSTALL_GMP}
- INSTALL_SOAP=${WORKSPACE_INSTALL_SOAP}
- INSTALL_XSL=${WORKSPACE_INSTALL_XSL}
- INSTALL_LDAP=${WORKSPACE_INSTALL_LDAP}
- INSTALL_IMAP=${WORKSPACE_INSTALL_IMAP}
- INSTALL_MONGO=${WORKSPACE_INSTALL_MONGO}
- INSTALL_AMQP=${WORKSPACE_INSTALL_AMQP}
- INSTALL_PHPREDIS=${WORKSPACE_INSTALL_PHPREDIS}
- INSTALL_MSSQL=${WORKSPACE_INSTALL_MSSQL}
- INSTALL_NODE=${WORKSPACE_INSTALL_NODE}
- NPM_REGISTRY=${WORKSPACE_NPM_REGISTRY}
- INSTALL_YARN=${WORKSPACE_INSTALL_YARN}
- INSTALL_NPM_GULP=${WORKSPACE_INSTALL_NPM_GULP}
- INSTALL_NPM_BOWER=${WORKSPACE_INSTALL_NPM_BOWER}
- INSTALL_NPM_VUE_CLI=${WORKSPACE_INSTALL_NPM_VUE_CLI}
- INSTALL_NPM_ANGULAR_CLI=${WORKSPACE_INSTALL_NPM_ANGULAR_CLI}
- INSTALL_DRUSH=${WORKSPACE_INSTALL_DRUSH}
- INSTALL_WP_CLI=${WORKSPACE_INSTALL_WP_CLI}
- INSTALL_DRUPAL_CONSOLE=${WORKSPACE_INSTALL_DRUPAL_CONSOLE}
- INSTALL_AEROSPIKE=${WORKSPACE_INSTALL_AEROSPIKE}
- INSTALL_V8JS=${WORKSPACE_INSTALL_V8JS}
- COMPOSER_GLOBAL_INSTALL=${WORKSPACE_COMPOSER_GLOBAL_INSTALL}
- COMPOSER_AUTH=${WORKSPACE_COMPOSER_AUTH}
- COMPOSER_REPO_PACKAGIST=${WORKSPACE_COMPOSER_REPO_PACKAGIST}
- INSTALL_WORKSPACE_SSH=${WORKSPACE_INSTALL_WORKSPACE_SSH}
- INSTALL_LARAVEL_ENVOY=${WORKSPACE_INSTALL_LARAVEL_ENVOY}
- INSTALL_LARAVEL_INSTALLER=${WORKSPACE_INSTALL_LARAVEL_INSTALLER}
- INSTALL_DEPLOYER=${WORKSPACE_INSTALL_DEPLOYER}
- INSTALL_PRESTISSIMO=${WORKSPACE_INSTALL_PRESTISSIMO}
- INSTALL_LINUXBREW=${WORKSPACE_INSTALL_LINUXBREW}
- INSTALL_MC=${WORKSPACE_INSTALL_MC}
- INSTALL_SYMFONY=${WORKSPACE_INSTALL_SYMFONY}
- INSTALL_PYTHON=${WORKSPACE_INSTALL_PYTHON}
- INSTALL_IMAGE_OPTIMIZERS=${WORKSPACE_INSTALL_IMAGE_OPTIMIZERS}
- INSTALL_IMAGEMAGICK=${WORKSPACE_INSTALL_IMAGEMAGICK}
- INSTALL_TERRAFORM=${WORKSPACE_INSTALL_TERRAFORM}
- INSTALL_DUSK_DEPS=${WORKSPACE_INSTALL_DUSK_DEPS}
- INSTALL_PG_CLIENT=${WORKSPACE_INSTALL_PG_CLIENT}
- INSTALL_PHALCON=${WORKSPACE_INSTALL_PHALCON}
- INSTALL_SWOOLE=${WORKSPACE_INSTALL_SWOOLE}
- INSTALL_LIBPNG=${WORKSPACE_INSTALL_LIBPNG}
- INSTALL_IONCUBE=${WORKSPACE_INSTALL_IONCUBE}
- INSTALL_MYSQL_CLIENT=${WORKSPACE_INSTALL_MYSQL_CLIENT}
- PUID=${WORKSPACE_PUID}
- PGID=${WORKSPACE_PGID}
- CHROME_DRIVER_VERSION=${WORKSPACE_CHROME_DRIVER_VERSION}
- NODE_VERSION=${WORKSPACE_NODE_VERSION}
- YARN_VERSION=${WORKSPACE_YARN_VERSION}
- DRUSH_VERSION=${WORKSPACE_DRUSH_VERSION}
- TZ=${WORKSPACE_TIMEZONE}
- BLACKFIRE_CLIENT_ID=${BLACKFIRE_CLIENT_ID}
- BLACKFIRE_CLIENT_TOKEN=${BLACKFIRE_CLIENT_TOKEN}
- INSTALL_POWERLINE=${WORKSPACE_INSTALL_POWERLINE}
- INSTALL_FFMPEG=${WORKSPACE_INSTALL_FFMPEG}
volumes:
- ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG}
extra_hosts:
- "dockerhost:${DOCKER_HOST_IP}"
ports:
- "${WORKSPACE_SSH_PORT}:22"
- "3000:3000"
- "3001:3001"
tty: true
environment:
- PHP_IDE_CONFIG=${PHP_IDE_CONFIG}
- DOCKER_HOST=tcp://docker-in-docker:2375
networks:
- frontend
- backend
links:
- docker-in-docke
I'm running apache on port 80, so my app can be seen at http://localhost
I'm able to access the UI at localhost:3001, but can't access localhost:3000.