I have a dockerized back-end with golang gin server, postgresql and redis.
Everything starts correctly with this docker-compose.yaml file :
version: '3.9'
services:
postgresql:
image: 'postgres:13.1-alpine'
volumes:
- data:/var/lib/postgresql/data
env_file:
- ./env/postgre.env
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
ports:
- '5432:5432'
server:
build: ./server
ports:
- '8000:8000'
volumes:
- ./server:/app
depends_on:
- postgresql
redis:
image: "redis"
ports:
- "6379:6379"
volumes:
- $PWD/redis-data:/var/lib/redis
volumes:
data:
Than I initialize redis in main func :
func main() {
util.InitializeRedis()
(...)
// InitializeRedis func
func newPool() *redis.Pool {
return &redis.Pool{
MaxIdle:3,
IdleTimeout:240 * time.Second,
DialContext: func(context.Context) (redis.Conn, error) {
return redis.Dial("tcp",":6379")
},
}
}
var (
pool *redis.Pool
)
func InitializeRedis() {
flag.Parse()
pool = newPool()
}
It doesn't prompt any error, but I cannot get connection with pool.Get in another function :
// Handle "/redis" for test
router.GET("/redis", util.ServeHome)
// ServeHome func
func ServeHome(ctx *gin.Context){
conn := pool.Get()
defer conn.Close()
var p1 struct{
Title string `redis:"title" json:"title"`
Author string `redis:"author" json:"author"`
Body string `redis:"body" json:"body"`
}
p1.Title = "Example"
p1.Author = "Gary"
p1.Body = "Hello"
if _, err := conn.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil {
log.Fatalf("Error occured with redis HMSEET, %v", err) // Error in console is from here
return
}
(...)
And when I try to access /redis with Insomnia it shows: Error: Server returned nothing (no headers, no data) and in console logs : Error occured with redis HMSEET, dial tcp :6379: connect: connection refused
I couldn't find any article which solve this problem for me, so I do appreciate any help.
Since you're using docker-compose Redis won't be available on :6379, instead it will be available on the hostname redis.
I think you'll need to update your code to the following:
redis.Dial("tcp","redis:6379")
Related
I have a project, which consist of Go application + Nginx + Db(Postgres). All are building in docker containers.
It is my docker-compose.yml file:
version: "3"
services:
db:
image: postgres:10
environment:
- POSTGRES_PASSWORD=DatabasePassword
- POSTGRES_USER=egor
- POSTGRES_DB=postgres
expose:
- 5432
backend:
build: .
environment:
- POSTGRES_URL=postgres://egor:DatabasePassword#db:5432/postgres?sslmode=disable
- LISTEN_ADDRESS=:5432
depends_on:
- db
proxy:
image: nginx
volumes:
- type: bind
source: ./nginx.conf
target: /etc/nginx/nginx.conf
ports:
- 80:80
depends_on:
- backend
- db
it is my go application:
package main
import (
"database/sql"
"fmt"
"time"
_ "github.com/lib/pq"
"log"
"net/http"
"github.com/caarlos0/env"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type config struct {
PostgresUri string `env:"POSTGRES_URL" envDefault:"postgres://root:pass#localhost:5432/postgres?sslmode=disable"`
ListenAddress string `env:"LISTEN_ADDRESS" envDefault:":7000"`
//PostgresHost string `env:"POSTGRES_HOST" envDefault:":l"`
//PostgresUser string `env:"POSTGRES_USER" envDefault:":root"`
//PostgresPassword string `env:"POSTGRES_PASSWD" envDefault:":qwerty"`
//PostgresName string `env:"POSTGRES_NAME" envDefault:":postgres"`
}
var (
db *sql.DB
errorsCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gocalc_errors_count",
Help: "Gocalc Errors Count Per Type",
},
[]string{"type"},
)
requestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gocalc_requests_count",
Help: "Gocalc Requests Count",
})
)
func main() {
var err error
// Initing prometheus
prometheus.MustRegister(errorsCount)
prometheus.MustRegister(requestsCount)
// Getting env
cfg := config{}
if err = env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
time.Sleep(time.Second)
fmt.Println("Sleep over!")
// Connecting to database
//psqlInfo := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=5432 sslmode=disable",
// cfg.PostgresHost,cfg.ListenAddress,cfg.PostgresUser,cfg.PostgresPassword,cfg.PostgresName)
//db, err := sql.Open("postgres", "host=db user=egor password=DatabasePassword dbname=postgres port=5432 sslmode=disable")
db, err = sql.Open("postgres",cfg.PostgresUri)
if err != nil {
log.Fatalf("Can't connect to postgresql: %v", err)
}
defer db.Close()
err = db.Ping()
if err != nil {
log.Fatalf("Can't ping database: %v", err)
}
http.HandleFunc("/", handler)
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(cfg.ListenAddress, nil))
}
func handler(w http.ResponseWriter, r *http.Request) {
requestsCount.Inc()
keys, ok := r.URL.Query()["q"]
if !ok || len(keys[0]) < 1 {
errorsCount.WithLabelValues("missing").Inc()
log.Println("Url Param 'q' is missing")
http.Error(w, "Bad Request", 400)
return
}
q := keys[0]
log.Println("Got query: ", q)
var result string
sqlStatement := fmt.Sprintf("SELECT (%s)::numeric", q)
row := db.QueryRow(sqlStatement)
err := row.Scan(&result)
if err != nil {
log.Println("Error from db: %s", err)
errorsCount.WithLabelValues("db").Inc()
http.Error(w, "Internal Server Error", 500)
return
}
fmt.Fprintf(w, "query %s; result %s", q, result)
}
And my nginx configuration:
events{
worker_connections 1024;
}
http{
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://backend:7000;
}
}
}
But when i'm going to try page in browser, i see error page - 502 Bad Gateway nginx.
It is my log:
2022/11/08 23:41:24 [error] 29#29: *1 connect() failed (111: Connection refused) while connecting to upstream, client: xxx.xx.x.x, server: localhost, request: "GET / HTTP/1.1", upstream: "http://xxx.xx.x.x:7000/", host: "0.0.0.0"
What is problem? All services work correctly, only nginx reversy proxy has error
I just put together a small project that represents your scenario. This is the repository structure:
webapp/
nginx/
Dockerfile
nginx.conf
web/
Dockerfile
main.go
docker-compose.yaml
The content of each file are as follows.
nginx/nginx.conf
events{}
http {
server {
listen 80;
location / {
proxy_pass http://backend:7000;
}
}
}
More or less is your same file.
nginx/Dockerfile
FROM nginx
EXPOSE 80
COPY nginx.conf /etc/nginx/nginx.conf
Here, we specify instructions to build the nginx container. We expose only the port 80.
web/main.go
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, World!\n")
})
http.ListenAndServe(":7000", nil)
}
Simple HTTP server with a hard-coded reply. This HTTP server listens for requests on port 7000.
web/Dockerfile
FROM golang:1.12.7-alpine3.10 AS build
WORKDIR /go/src/app
COPY ./main.go ./main.go
RUN go build -o ./bin/gowebserver ./main.go
FROM alpine:latest
COPY --from=build /go/src/app/bin /go/bin
EXPOSE 7000
ENTRYPOINT go/bin/gowebserver
Here, we use the multi-stage build. In the first section we build the HTTP server while in the second one, we copy the executable on a leaner base image of Docker. We expose port 7000 of this container.
docker-compose.yaml
version: "3"
services:
backend:
build: "./web"
expose:
- "7000"
nginx:
build: "./nginx"
ports:
- "80:80"
depends_on:
- "backend"
Here, is the last part that connects all. We expose to the outside only the port 80. Internally, the backend service exposes port 7000 to be contacted by the nginx service.
To spin up everything, you've to run these two commands (in the root folder of the project):
docker-compose build
docker-compose up
To test this solution you've to use your internal IP address (in my case was something like 192.168.1.193) and navigate to the URL http://192.168.1.193/ which should give you an Hello, World! message.
Let me know if this solves your issue!
My goal is to create seeds of users when the database is created.
I'm using idserver4, with npgsql, docker-compose.
The current behavior creates the database and as well the identityserver user manager tables (AspNetUsers, AspNetUserTokens, AspNetUserRoles, etc..). So I know it's migrating that data to the database. But it skips over the Task of running the User seed because it throws a password exception:
Npgsql.NpgsqlException (0x80004005): No password has been provided but the backend requires one (in MD5)
Here's the code in my Program.cs.
public static void Main(string[] args)
{
var host = CreateHostBuilder(args).Build();
using (var scope = host.Services.CreateScope())
{
var services = scope.ServiceProvider;
try
{
var userManager = services.GetRequiredService<UserManager<User>>();
var roleManager = services.GetRequiredService<RoleManager<IdentityRole>>();
var context = services.GetRequiredService<ApplicationDbContext>();
context.Database.Migrate(); // ERROR HAPPENS HERE
Task.Run(async () => await UserAndRoleSeeder.SeedUsersAndRoles(roleManager, userManager)).Wait(); // I NEED THIS TO RUN
}
catch (Exception ex)
{
var logger = services.GetRequiredService<ILogger<Program>>();
logger.LogError(ex, "Error has occured while migrating to the database.");
}
}
host.Run();
}
Here is the code where it gets the connection string in Startup.cs:
services.AddDbContext<ApplicationDbContext>(options =>
{
options.UseNpgsql(Configuration.GetConnectionString("DefaultConnection"),
b =>
{
b.MigrationsAssembly("GLFManager.App");
});
});
If I use a breakpoint here, it shows that the connection string was obtained along with the user id and password. I verified the password was correct. Or else I don't think it would initially commit the Idserver user manager tables.
Here is my appsettings.json file where the connection string lives:
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
},
"AllowedHosts": "*",
"ConnectionStrings": {
"DefaultConnection": "Host=localhost;Port=33010;Database=glfdb;User Id=devdbuser;Password=devdbpassword"
}
}
I'm thinking it's somewhere in the docker-compose file where some configuration is not registering. This is the docker-compose file:
version: '3.4'
services:
glfmanager.api:
image: ${DOCKER_REGISTRY-}glfmanagerapi
container_name: "glfmanager.api"
build:
context: .
dockerfile: GLFManager.Api/Dockerfile
ports:
- "33000:80"
- "33001:443"
environment:
- ConnectionStrings__DefaultConnection=Server=glfmanager.db;Database=glfdb;User Id=devdbuser:password=devdbpassword;
- Identity_Authority=http://glfmanager.auth
volumes:
- .:/usr/src/app
depends_on:
- "glfmanager.db"
glfmanager.auth:
image: ${DOCKER_REGISTRY-}glfmanagerauth
container_name: "glfmanager.auth"
build:
context: .
dockerfile: GLFManager.Auth/Dockerfile
ports:
- "33005:80"
- "33006:443"
environment:
- ConnectionStrings__DefaultConnection=Server=glfmanager.db;Database=glfdb;User Id=devdbuser:password=devdbpassword;
volumes:
- .:/usr/src/app
depends_on:
- "glfmanager.db"
glfmanager.db:
restart: on-failure
image: "mdillon/postgis:11"
container_name: "glfmanager.db"
environment:
- POSTGRES_USER=devdbuser
- POSTGRES_DB=glfdb
- POSTGRES_PASSWORD=devdbpassword
volumes:
- glfmanager-db:/var/lib/postresql/data
ports:
- "33010:5432"
volumes:
glfmanager-db:
I used this code from a class I took on backend developing and the code is Identitcal to the project I've built in that, and it works. So I'm stumped as to why this is giving me that password error.
Found the problem. I used a ':' instead of ';' in my docker file between User Id and password
node for my nodejs code. I have an api for requeest/response.
First of all, I make a request http://localhost:3000/number1 , after I start a consumer whitch consume messages from a kafka topic and from one partition "receive" and I try to find the message with id = number1 . After I want to return a response to user with this value. So I create a consumer like the follow :
options = {
kafkaHost: 'kafka:9092'
}
const client_node = new kafka_node.KafkaClient(options);
var Consumer = kafka_node.Consumer
var consumer_node = new Consumer(
client_node,
[
{ topic: 'receive.kafka.entities', partition: 0 , offset: 0}
],
{
autoCommit: false,
fetchMaxWaitMs: 100,
fromOffset: 'earliest' ,
groupId: 'kafka-node-group',
asyncPush: false,
}
);
const read = (callback)=>{
let ret = "1"
consumer_node.on('message',async function (message) {
var parse1 = JSON.parse(message.value)
var parse2 = JSON.parse(parse1.payload)
var id = parse2.fullDocument.id
var lastOffset = message.highWaterOffset - 1
//check if there is a query
if(lastOffset <= message.offset || ret !== "1"){
return callback(ret)
}
else if(id === back2){
ret = parse2.fullDocument
}
});
}
let error = {
id: "The entity " + back2 + " not found "
}
read((data)=>{
consumer_node.close(true,function(message){
if(data != "1"){
res.status(200).send(data)
}
else{
res.status(404).send(error)
}
})
})
If I try to make one continuous requests, after the first request I get a response :
{
"message": "Broker not available (loadMetadataForTopics)"
}
my Docker-compose file1 is the following :
zookeeper:
image: confluentinc/cp-zookeeper:5.4.1
container_name: stellio-zookeeper
ports:
- 2181:2181
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- default
- localnet
kafka:
image: confluentinc/cp-enterprise-kafka:latest
container_name: kafka
ports:
- 9092:9092
- 9101:9101
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,EXTERNAL://localhost:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:9092
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
depends_on:
- zookeeper
networks:
- default
- localnet
- my-proxy-net-kafka
networks:
default: # this network (app2)
driver: bridge
my-proxy-net-kafka:
external:
name: kafka_network
Docker-compose file2
app:
container_name: docker-node
hostname: docker-node
restart: always
build: .
command: nodemon /usr/src/app/index.js
networks:
- default
- proxynet-kafka
ports:
- '3000:3000'
volumes:
- .:/usr/src/app
networks:
default:
driver: bridge
proxynet-kafka:
name: kafka_network
Why that happens? Can you help me to fix this?
[ If you want more information feel free to ask me :) ]
We have a docker compose that connecting a golang mail service with a mailhog service. Below is the mailhog configuration in our docker-compose.yml:
mailhog:
image: mailhog/mailhog:latest
restart: always
ports:
- 1025:1025
- 8025:8025
networks:
mercury:
aliases:
- mailhog.local
We are using gopkg.in/gomail.v2 package. Below is the function from the golang mail service within our docker-compose. This is the function that is returning the error:
func (e *Email) attemptToSendMessage(m structs.Message) {
var s gomail.SendCloser
var err error
//If we want to disable any emails being sent, we have set an ENV var to disallow it.
if !e.DisableEmail {
d := gomail.NewDialer(e.smtpServer, e.smtpPort, e.smtpUser, e.smtpPassword)
if s, err = d.Dial(); err != nil {
log.WithFields(log.F("error", err)).Notice("Issue connecting to smtp server")
e.failMessage(m)
return
}
if err = gomail.Send(s, e.parseMessage(m)); err != nil {
log.WithFields(log.F("error", err)).Notice("ERROR sending to smtp server, retrying")
e.failMessage(m)
s.Close()
return
}
s.Close()
}
When we attempt to connect though, we get the following error msg:
Dec 08 08:25:18 35.183.142.45 golang.mail.app.instance 2020-12-08T13:25:18.758599257Z NOTICE Issue connecting to smtp server error=unencrypted connection
Messsage returned from mailhog app.
Dec 08 08:25:18 35.183.142.45 mailhog.instance 2020/12/08 13:25:18 [SMTP 172.29.0.4:33526] Sent 16 bytes: '250 AUTH PLAIN\r\n'
Dec 08 08:25:18 35.183.142.45 mailhog.instance 2020/12/08 13:25:18 [SMTP 172.29.0.4:33526] Received 6 bytes: 'QUIT\r\n'
My tests are running against a docker grid with selenium docker images for hub and chrome. What I am trying to do is access chrome devtools protocols in the chrome node so that I can access/intercept a request.Any help is appreciated
I was able to get it working without docker in my local. But could not figure out a way to connect the devtools in chrome node of docker grid. Below is my docker-compose and code
docker compose
version: "3.7"
services:
selenium_hub_ix:
container_name: selenium_hub_ix
image: selenium/hub:latest
environment:
SE_OPTS: "-port 4445"
ports:
- 4445:4445
chrome_ix:
image: selenium/node-chrome-debug:latest
container_name: chrome_node_ix
depends_on:
- selenium_hub_ix
ports:
- 5905:5900
- 5903:5555
- 9222:9222
environment:
- no_proxy=localhost
- HUB_PORT_4444_TCP_ADDR=selenium_hub_ix
- HUB_PORT_4444_TCP_PORT=4445
- NODE_MAX_INSTANCES=5
- NODE_MAX_SESSION=5
- TZ=America/Chicago
volumes:
- /dev/shm:/dev/shm
Here is sample code how I got it working in local without grid (chrome driver in my mac)
const CDP = require('chrome-remote-interface');
let webDriver = require("selenium-webdriver");
module.exports = {
async openBrowser() {
this.driver = await new webDriver.Builder().forBrowser("chrome").build();
let session = await this.driver.session_
let debuggerAddress = await session.caps_.map_.get("goog:chromeOptions").debuggerAddress;
let AddressString = debuggerAddress.split(":");
console.log(AddressString)
try {
const protocol = await CDP({
port: AddressString[1]
});
} catch (err) {
console.log(err.message)
const {
Network,
Fetch
} = protocol;
await Fetch.enable({
patterns: [{
urlPattern: "*",
}]
});
}
await Fetch.requestPaused(async ({
interceptionId,
request
}) => {
console.log(request)
})
}
return this.driver;
},
}
When it is grid, I just change the way build the driver to below
this.driver = await new webDriver.Builder().usingServer(process.env.SELENIUM_HUB_IP).withCapabilities(webDriver.Capabilities.chrome()).build();
With that I am getting the port number but could not create a CDP session and I get a connection refused error.