We have a docker compose that connecting a golang mail service with a mailhog service. Below is the mailhog configuration in our docker-compose.yml:
mailhog:
image: mailhog/mailhog:latest
restart: always
ports:
- 1025:1025
- 8025:8025
networks:
mercury:
aliases:
- mailhog.local
We are using gopkg.in/gomail.v2 package. Below is the function from the golang mail service within our docker-compose. This is the function that is returning the error:
func (e *Email) attemptToSendMessage(m structs.Message) {
var s gomail.SendCloser
var err error
//If we want to disable any emails being sent, we have set an ENV var to disallow it.
if !e.DisableEmail {
d := gomail.NewDialer(e.smtpServer, e.smtpPort, e.smtpUser, e.smtpPassword)
if s, err = d.Dial(); err != nil {
log.WithFields(log.F("error", err)).Notice("Issue connecting to smtp server")
e.failMessage(m)
return
}
if err = gomail.Send(s, e.parseMessage(m)); err != nil {
log.WithFields(log.F("error", err)).Notice("ERROR sending to smtp server, retrying")
e.failMessage(m)
s.Close()
return
}
s.Close()
}
When we attempt to connect though, we get the following error msg:
Dec 08 08:25:18 35.183.142.45 golang.mail.app.instance 2020-12-08T13:25:18.758599257Z NOTICE Issue connecting to smtp server error=unencrypted connection
Messsage returned from mailhog app.
Dec 08 08:25:18 35.183.142.45 mailhog.instance 2020/12/08 13:25:18 [SMTP 172.29.0.4:33526] Sent 16 bytes: '250 AUTH PLAIN\r\n'
Dec 08 08:25:18 35.183.142.45 mailhog.instance 2020/12/08 13:25:18 [SMTP 172.29.0.4:33526] Received 6 bytes: 'QUIT\r\n'
Related
I have a project, which consist of Go application + Nginx + Db(Postgres). All are building in docker containers.
It is my docker-compose.yml file:
version: "3"
services:
db:
image: postgres:10
environment:
- POSTGRES_PASSWORD=DatabasePassword
- POSTGRES_USER=egor
- POSTGRES_DB=postgres
expose:
- 5432
backend:
build: .
environment:
- POSTGRES_URL=postgres://egor:DatabasePassword#db:5432/postgres?sslmode=disable
- LISTEN_ADDRESS=:5432
depends_on:
- db
proxy:
image: nginx
volumes:
- type: bind
source: ./nginx.conf
target: /etc/nginx/nginx.conf
ports:
- 80:80
depends_on:
- backend
- db
it is my go application:
package main
import (
"database/sql"
"fmt"
"time"
_ "github.com/lib/pq"
"log"
"net/http"
"github.com/caarlos0/env"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type config struct {
PostgresUri string `env:"POSTGRES_URL" envDefault:"postgres://root:pass#localhost:5432/postgres?sslmode=disable"`
ListenAddress string `env:"LISTEN_ADDRESS" envDefault:":7000"`
//PostgresHost string `env:"POSTGRES_HOST" envDefault:":l"`
//PostgresUser string `env:"POSTGRES_USER" envDefault:":root"`
//PostgresPassword string `env:"POSTGRES_PASSWD" envDefault:":qwerty"`
//PostgresName string `env:"POSTGRES_NAME" envDefault:":postgres"`
}
var (
db *sql.DB
errorsCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gocalc_errors_count",
Help: "Gocalc Errors Count Per Type",
},
[]string{"type"},
)
requestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gocalc_requests_count",
Help: "Gocalc Requests Count",
})
)
func main() {
var err error
// Initing prometheus
prometheus.MustRegister(errorsCount)
prometheus.MustRegister(requestsCount)
// Getting env
cfg := config{}
if err = env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
time.Sleep(time.Second)
fmt.Println("Sleep over!")
// Connecting to database
//psqlInfo := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=5432 sslmode=disable",
// cfg.PostgresHost,cfg.ListenAddress,cfg.PostgresUser,cfg.PostgresPassword,cfg.PostgresName)
//db, err := sql.Open("postgres", "host=db user=egor password=DatabasePassword dbname=postgres port=5432 sslmode=disable")
db, err = sql.Open("postgres",cfg.PostgresUri)
if err != nil {
log.Fatalf("Can't connect to postgresql: %v", err)
}
defer db.Close()
err = db.Ping()
if err != nil {
log.Fatalf("Can't ping database: %v", err)
}
http.HandleFunc("/", handler)
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(cfg.ListenAddress, nil))
}
func handler(w http.ResponseWriter, r *http.Request) {
requestsCount.Inc()
keys, ok := r.URL.Query()["q"]
if !ok || len(keys[0]) < 1 {
errorsCount.WithLabelValues("missing").Inc()
log.Println("Url Param 'q' is missing")
http.Error(w, "Bad Request", 400)
return
}
q := keys[0]
log.Println("Got query: ", q)
var result string
sqlStatement := fmt.Sprintf("SELECT (%s)::numeric", q)
row := db.QueryRow(sqlStatement)
err := row.Scan(&result)
if err != nil {
log.Println("Error from db: %s", err)
errorsCount.WithLabelValues("db").Inc()
http.Error(w, "Internal Server Error", 500)
return
}
fmt.Fprintf(w, "query %s; result %s", q, result)
}
And my nginx configuration:
events{
worker_connections 1024;
}
http{
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://backend:7000;
}
}
}
But when i'm going to try page in browser, i see error page - 502 Bad Gateway nginx.
It is my log:
2022/11/08 23:41:24 [error] 29#29: *1 connect() failed (111: Connection refused) while connecting to upstream, client: xxx.xx.x.x, server: localhost, request: "GET / HTTP/1.1", upstream: "http://xxx.xx.x.x:7000/", host: "0.0.0.0"
What is problem? All services work correctly, only nginx reversy proxy has error
I just put together a small project that represents your scenario. This is the repository structure:
webapp/
nginx/
Dockerfile
nginx.conf
web/
Dockerfile
main.go
docker-compose.yaml
The content of each file are as follows.
nginx/nginx.conf
events{}
http {
server {
listen 80;
location / {
proxy_pass http://backend:7000;
}
}
}
More or less is your same file.
nginx/Dockerfile
FROM nginx
EXPOSE 80
COPY nginx.conf /etc/nginx/nginx.conf
Here, we specify instructions to build the nginx container. We expose only the port 80.
web/main.go
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, World!\n")
})
http.ListenAndServe(":7000", nil)
}
Simple HTTP server with a hard-coded reply. This HTTP server listens for requests on port 7000.
web/Dockerfile
FROM golang:1.12.7-alpine3.10 AS build
WORKDIR /go/src/app
COPY ./main.go ./main.go
RUN go build -o ./bin/gowebserver ./main.go
FROM alpine:latest
COPY --from=build /go/src/app/bin /go/bin
EXPOSE 7000
ENTRYPOINT go/bin/gowebserver
Here, we use the multi-stage build. In the first section we build the HTTP server while in the second one, we copy the executable on a leaner base image of Docker. We expose port 7000 of this container.
docker-compose.yaml
version: "3"
services:
backend:
build: "./web"
expose:
- "7000"
nginx:
build: "./nginx"
ports:
- "80:80"
depends_on:
- "backend"
Here, is the last part that connects all. We expose to the outside only the port 80. Internally, the backend service exposes port 7000 to be contacted by the nginx service.
To spin up everything, you've to run these two commands (in the root folder of the project):
docker-compose build
docker-compose up
To test this solution you've to use your internal IP address (in my case was something like 192.168.1.193) and navigate to the URL http://192.168.1.193/ which should give you an Hello, World! message.
Let me know if this solves your issue!
I am using go-redis to connect to Redis server running on docker desktop while running my go app straight on my mac.
This my client setup:
package redis
import (
"fmt"
"os"
"github.com/go-redis/redis/v8"
)
var redisClient *RedisClient
type RedisClient struct {
*redis.Client
}
func GetRedisClient() *RedisClient {
if redisClient != nil {
return redisClient
}
host := os.Getenv("REDIS_HOST")
port := os.Getenv("REDIS_PORT")
password := os.Getenv("REDIS_PASS")
client := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("%s:%s", host, port),
Password: password, // no password set
DB: 0,
})
redisClient = &RedisClient{
Client: client,
}
return redisClient
}
Docker:
version: "3.8"
services:
redis:
container_name: redis
image: redis:6.2
ports:
- "6379:6379"
ulimits:
nofile:
soft: 65536
hard: 65536
The app will expose websocket connections to drivers that will communicate their current location every second and then save them in Redis using GEOADD.
Also the app will expose another set of websocket connections to the same drivers for general notifications if any using BRPOP.
After 70 driver of websocket connections I get errors from the extra drivers trying to connect. The errors come from the function that saves the location to Redis. The errors I get:
dial tcp [::1]:6379: socket: too many open files
and sometimes dial tcp: lookup localhost: no such host
func (r *RedisClient) SetPoint(ctx context.Context, item *Identifier, loc *Location) error {
geoLocaiton := &redis.GeoLocation{Name: item.Id, Latitude: loc.Lat, Longitude: loc.Lng}
if err := r.GeoAdd(ctx, item.key(), geoLocaiton).Err(); err != nil {
fmt.Println("error adding geo", err)
return errors.New("failed to set point")
}
return nil
}
For general notifications (timeout on the pulling is zero) meaning infinate:
type DriverData struct {
Status OrderStatusType `json:"status,omitempty"`
DriverId uint `json:"driver_id,omitempty"`
UserId uint `json:"user_id,omitempty"`
}
func (config *Config) DriverOrderStatus(c *gin.Context) {
driverID := utils.ToUint(auth.GetToken(c).Subject)
ctx := c.Request.Context()
// order := models.GetOrder(config.Db)
// var _ = order.GetActiveOrderForUser(driverID)
wsconn, err := websocket.Accept(c.Writer, c.Request, &websocket.AcceptOptions{InsecureSkipVerify: true})
if err != nil {
return
}
// if order.ID != 0 {
// var _ = wsjson.Write(ctx, wsconn, &UserData{Order: order, Status: order.Status, Driver: order.Driver})
// } else {
// var _ = wsjson.Write(ctx, wsconn, &UserData{ResetOrder: true})
// }
defer wsconn.Close(websocket.StatusInternalError, "")
closeRead := wsconn.CloseRead(ctx)
driverDataCh := make(chan *DriverData, 1000)
go func() {
loop:
for {
select {
case <-closeRead.Done():
break loop
default:
if status, err := config.Redis.DriverPullStatus(ctx, driverID); err == nil {
driverDataCh <- &DriverData{Status: status.Status, DriverId: status.DriverID, UserId: status.UserID}
}
}
}
fmt.Println("redis pulling data is over")
}()
loop:
for {
select {
case <-closeRead.Done():
break loop
case driverData := <-driverDataCh:
if err := wsjson.Write(ctx, wsconn, driverData); err != nil {
break loop
}
}
}
fmt.Println("sending updates to user is over")
}
This is Redis server info:
127.0.0.1:6379> info
# Server
redis_version:6.2.6
redis_git_sha1:00000000
redis_git_dirty:0
redis_build_id:a0adc3471b8cfa72
redis_mode:standalone
os:Linux 5.10.47-linuxkit x86_64
arch_bits:64
multiplexing_api:epoll
atomicvar_api:atomic-builtin
gcc_version:10.3.1
process_id:1
process_supervised:no
run_id:d92005e2ccb89ea8e3be57e3bb1b79e0e323c2a7
tcp_port:6379
server_time_usec:1654937072463352
uptime_in_seconds:325287
uptime_in_days:3
hz:10
configured_hz:10
lru_clock:10769904
executable:/data/redis-server
config_file:
io_threads_active:0
# Clients
connected_clients:104
cluster_connections:0
maxclients:10000
client_recent_max_input_buffer:48
client_recent_max_output_buffer:0
blocked_clients:81
tracking_clients:0
clients_in_timeout_table:0
# Memory
used_memory:3081168
used_memory_human:2.94M
used_memory_rss:5791744
used_memory_rss_human:5.52M
used_memory_peak:5895528
used_memory_peak_human:5.62M
used_memory_peak_perc:52.26%
used_memory_overhead:2944804
used_memory_startup:809880
used_memory_dataset:136364
used_memory_dataset_perc:6.00%
allocator_allocated:3166992
allocator_active:3862528
allocator_resident:6742016
total_system_memory:4125036544
total_system_memory_human:3.84G
used_memory_lua:37888
used_memory_lua_human:37.00K
used_memory_scripts:0
used_memory_scripts_human:0B
number_of_cached_scripts:0
maxmemory:0
maxmemory_human:0B
maxmemory_policy:noeviction
allocator_frag_ratio:1.22
allocator_frag_bytes:695536
allocator_rss_ratio:1.75
allocator_rss_bytes:2879488
rss_overhead_ratio:0.86
rss_overhead_bytes:-950272
mem_fragmentation_ratio:1.88
mem_fragmentation_bytes:2712392
mem_not_counted_for_evict:0
mem_replication_backlog:0
mem_clients_slaves:0
mem_clients_normal:2134588
mem_aof_buffer:0
mem_allocator:jemalloc-5.1.0
active_defrag_running:0
lazyfree_pending_objects:0
lazyfreed_objects:0
# Persistence
loading:0
current_cow_size:0
current_cow_size_age:0
current_fork_perc:0.00
current_save_keys_processed:0
current_save_keys_total:0
rdb_changes_since_last_save:3636
rdb_bgsave_in_progress:0
rdb_last_save_time:1654936992
rdb_last_bgsave_status:ok
rdb_last_bgsave_time_sec:1
rdb_current_bgsave_time_sec:-1
rdb_last_cow_size:450560
aof_enabled:0
aof_rewrite_in_progress:0
aof_rewrite_scheduled:0
aof_last_rewrite_time_sec:-1
aof_current_rewrite_time_sec:-1
aof_last_bgrewrite_status:ok
aof_last_write_status:ok
aof_last_cow_size:0
module_fork_in_progress:0
module_fork_last_cow_size:0
# Stats
total_connections_received:1271
total_commands_processed:296750
instantaneous_ops_per_sec:45
total_net_input_bytes:27751095
total_net_output_bytes:1254190
instantaneous_input_kbps:4.16
instantaneous_output_kbps:12.46
rejected_connections:0
sync_full:0
sync_partial_ok:0
sync_partial_err:0
expired_keys:0
expired_stale_perc:0.00
expired_time_cap_reached_count:0
expire_cycle_cpu_milliseconds:18136
evicted_keys:0
keyspace_hits:10
keyspace_misses:3567
pubsub_channels:0
pubsub_patterns:0
latest_fork_usec:6453
total_forks:41
migrate_cached_sockets:0
slave_expires_tracked_keys:0
active_defrag_hits:0
active_defrag_misses:0
active_defrag_key_hits:0
active_defrag_key_misses:0
tracking_total_keys:0
tracking_total_items:0
tracking_total_prefixes:0
unexpected_error_replies:0
total_error_replies:6
dump_payload_sanitizations:0
total_reads_processed:297924
total_writes_processed:295658
io_threaded_reads_processed:0
io_threaded_writes_processed:0
# Replication
role:master
connected_slaves:0
master_failover_state:no-failover
master_replid:fc426cf72670e6ad09221bcb9c3423a1e1fab47e
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:0
second_repl_offset:-1
repl_backlog_active:0
repl_backlog_size:1048576
repl_backlog_first_byte_offset:0
repl_backlog_histlen:0
# CPU
used_cpu_sys:428.939381
used_cpu_user:123.850311
used_cpu_sys_children:0.755309
used_cpu_user_children:0.065924
used_cpu_sys_main_thread:428.485425
used_cpu_user_main_thread:123.679341
# Modules
# Errorstats
errorstat_ERR:count=6
# Cluster
cluster_enabled:0
# Keyspace
db0:keys=6,expires=0,avg_ttl=0
After a lot of searching, it turns out that is caused by the max "open file descriptor". Every websocket connection will open a file descriptor. Every machine has a limit. In linux/unix, this is defined under ulimit.
More into that in this article.
In order to update ulimit in mac, refer to this post.
I have a dockerized back-end with golang gin server, postgresql and redis.
Everything starts correctly with this docker-compose.yaml file :
version: '3.9'
services:
postgresql:
image: 'postgres:13.1-alpine'
volumes:
- data:/var/lib/postgresql/data
env_file:
- ./env/postgre.env
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
ports:
- '5432:5432'
server:
build: ./server
ports:
- '8000:8000'
volumes:
- ./server:/app
depends_on:
- postgresql
redis:
image: "redis"
ports:
- "6379:6379"
volumes:
- $PWD/redis-data:/var/lib/redis
volumes:
data:
Than I initialize redis in main func :
func main() {
util.InitializeRedis()
(...)
// InitializeRedis func
func newPool() *redis.Pool {
return &redis.Pool{
MaxIdle:3,
IdleTimeout:240 * time.Second,
DialContext: func(context.Context) (redis.Conn, error) {
return redis.Dial("tcp",":6379")
},
}
}
var (
pool *redis.Pool
)
func InitializeRedis() {
flag.Parse()
pool = newPool()
}
It doesn't prompt any error, but I cannot get connection with pool.Get in another function :
// Handle "/redis" for test
router.GET("/redis", util.ServeHome)
// ServeHome func
func ServeHome(ctx *gin.Context){
conn := pool.Get()
defer conn.Close()
var p1 struct{
Title string `redis:"title" json:"title"`
Author string `redis:"author" json:"author"`
Body string `redis:"body" json:"body"`
}
p1.Title = "Example"
p1.Author = "Gary"
p1.Body = "Hello"
if _, err := conn.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil {
log.Fatalf("Error occured with redis HMSEET, %v", err) // Error in console is from here
return
}
(...)
And when I try to access /redis with Insomnia it shows: Error: Server returned nothing (no headers, no data) and in console logs : Error occured with redis HMSEET, dial tcp :6379: connect: connection refused
I couldn't find any article which solve this problem for me, so I do appreciate any help.
Since you're using docker-compose Redis won't be available on :6379, instead it will be available on the hostname redis.
I think you'll need to update your code to the following:
redis.Dial("tcp","redis:6379")
I have an error where I cannot connect from my Go application to my redis container. It was working for at least 2 weeks, until I restarted my pc, I don't see what could have changed, and I ensured that no other containers or processes are using the same 6379 port.
My error:
panic: failed to load incr lua script: EOF
goroutine 1 [running]:
code/website.connectToRedisLimiterDatabase(0x0, 0x0)
I can connect into the redis container via my cli:
//exec inside
docker exec -it container-name redis-cli
// set value
set name "test"
// get value
get name
// shows test
Here is where I get the error in my go code:
redisLimiter "github.com/ulule/limiter/v3/drivers/store/redis"
redisSessions "github.com/rbcervilla/redisstore/v8"
// RedisLimiterInstance contains the Redis limiter client and store objects
type RedisLimiterInstance struct {
Client redisLimiter.Client
Store limiter.Store
}
// RedisSessionInstance contains the Redis session client and store objects
type RedisSessionInstance struct {
Client *redis.Client
Store *redisSessions.RedisStore
}
var redisLimiterInstance RedisLimiterInstance
var redisSessionInstance RedisSessionInstance
func connectToRedisLimiterDatabase() error {
redisLimiterClient := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
store, err := redisLimiter.NewStoreWithOptions(redisLimiterClient, limiter.StoreOptions{
Prefix: "rate_limiter_",
MaxRetry: 3,
})
if err != nil {
panic(err)
}
// panic: failed to load incr lua script: EOF
redisLimiterInstance = RedisLimiterInstance{
Client: redisLimiterClient,
Store: store,
}
return nil
}
func connectToRedisSessionDatabase() error {
redisSessionClient := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
// New default RedisStore
store, err := redisSessions.NewRedisStore(context.Background(), redisSessionClient)
if err != nil {
log.Fatal("failed to create redis store: ", err)
}
store.KeyPrefix("session_")
store.Options(sessions.Options{
Path: "/",
MaxAge: 86400 * 7,
HttpOnly: false,
})
redisSessionInstance = RedisSessionInstance{
Client: redisSessionClient,
Store: store,
}
return nil
}
Via the docker desktop the container shows:
Configuration loaded
Running mode=standalone, port=6379.
Server initialized
Ready to accept connections
My conf file is just:
bind 127.0.0.1
port 6379
and the docker file itself is:
FROM redis:6.0.9
COPY redis.conf /usr/local/etc/redis/redis.conf
CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ]
Any thoughts?
Binding to the address 127.0.0.1 in the redis container would only allow connections from within the container. Either remove the line or bind to 0.0.0.0 so redis can bind to all interfaces instead of just the loopback.
I'm using Go-micro, Docker, Traefik to deploy my service. I deployed go-micro service and registered with Traefik. This is my sum(grpc service) status in Traefik dashboard. When i curl it in Terminal, I got this result, I thought it's grpc message in binary. But when I used this code
package main
import (
"context"
"fmt"
proto "gomicro-demo/client/service"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"log"
)
func main() {
con, err := grpc.Dial("localhost:8080", grpc.WithInsecure())
if err != nil {
log.Fatal("Connection error: ", err)
}
md := metadata.New(map[string]string{"Host": "sum.traefik"})
ctx := metadata.NewOutgoingContext(context.Background(), md)
service := proto.NewSumClient(con)
res, err2 := service.GetSum(ctx, &proto.Request{})
if err2 == nil {
fmt.Println(res)
} else {
log.Fatal("Call error:", err2)
}
}
i got this error rpc error: code = Unimplemented desc = Not Found: HTTP status code 404; transport: received the unexpected content-type "text/plain; charset=utf-8". I can't know how this error happen, because of address or grpc metadata (Host header). Please help me with this problem. Thank you very much!
you can export tcp like it. please using trefik2,
HostSNI must be seted
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: redis
spec:
entryPoints:
- redis
routes:
- match: HostSNI(`*`)
services:
- name: redis
port: 6379