Puphpet vagrant for a ZF2 application - zend-framework2

I'm trying to create a VM for developing a Zend Framework 2 application.
I'm not very confortable with, so I'm trying to do that with Puphpet.
But I've got actually a lot of the different problems.
Small precision: I'm on Mac (10.11.1), and I'm using virtual box (5.0.6)
First, here is my configuration file of puphpet:
vagrantfile:
target: local
vm:
box: puphpet/ubuntu1404-x64
box_url: puphpet/ubuntu1404-x64
hostname: local.blog
memory: '1024'
cpus: '2'
chosen_provider: virtualbox
network:
private_network: 192.168.56.101
forwarded_port:
vflnp_nfdzn8w8a4h3:
host: '6757'
guest: '22'
post_up_message: ''
provider:
virtualbox:
modifyvm:
natdnshostresolver1: 'on'
showgui: '0'
vmware:
numvcpus: 1
parallels:
cpus: 1
provision:
puppet:
manifests_path: puphpet/puppet
manifest_file: site.pp
module_path: puphpet/puppet/modules
options:
- '--verbose'
- '--hiera_config /vagrant/puphpet/puppet/hiera.yaml'
- '--parser future'
synced_folder:
vflsf_knbv8t2xmfrp:
source: ./
target: /var/www/
sync_type: nfs
smb:
smb_host: ''
smb_username: ''
smb_password: ''
rsync:
args:
- '--verbose'
- '--archive'
- '-z'
exclude:
- .vagrant/
- .git/
auto: 'true'
owner: www-data
group: www-data
usable_port_range:
start: 10200
stop: 10500
ssh:
host: null
port: null
private_key_path: null
username: vagrant
guest_port: null
keep_alive: true
forward_agent: false
forward_x11: false
shell: 'bash -l'
vagrant:
host: detect
server:
install: '1'
packages: { }
users_groups:
install: '1'
groups: { }
users: { }
locale:
install: '1'
settings:
default_locale: fr_FR.UTF-8
locales:
- fr_FR.UTF-8
firewall:
install: '1'
rules: { }
cron:
install: '1'
jobs: { }
nginx:
install: '0'
settings:
default_vhost: 1
proxy_buffer_size: 128k
proxy_buffers: '4 256k'
upstreams: { }
vhosts:
nxv_j6ygd9l3rlpb:
server_name: local.recipes
server_aliases:
- local.recipes
www_root: /var/www/public
listen_port: '80'
index_files:
- index.html
- index.htm
- index.php
client_max_body_size: 1m
ssl: '0'
ssl_cert: ''
ssl_key: ''
ssl_port: '443'
ssl_protocols: ''
ssl_ciphers: ''
rewrite_to_https: '1'
spdy: '1'
locations:
nxvl_zdupydqyeec0:
location: /
autoindex: 'off'
internal: 'false'
try_files:
- $uri
- $uri/
- /index.php$is_args$args
fastcgi: ''
fastcgi_index: ''
fastcgi_split_path: ''
nxvl_w6tj6ii33ek6:
location: '~ \.php$'
autoindex: 'off'
internal: 'false'
try_files:
- $uri
- $uri/
- /index.php$is_args$args
fastcgi: '127.0.0.1:9000'
fastcgi_index: index.php
fastcgi_split_path: '^(.+\.php)(/.*)$'
fast_cgi_params_extra:
- 'SCRIPT_FILENAME $request_filename'
- 'APP_ENV dev'
proxies: { }
apache:
install: '1'
settings:
user: www-data
group: www-data
default_vhost: true
manage_user: false
manage_group: false
sendfile: 0
modules:
- proxy_fcgi
- rewrite
vhosts:
av_tordbapk4fv1:
servername: local.blog
serveraliases:
- www.local.blog
docroot: /var/www/blog/public
port: '80'
setenv:
- 'APP_ENV dev'
custom_fragment: ''
ssl: '0'
ssl_cert: ''
ssl_key: ''
ssl_chain: ''
ssl_certs_dir: ''
ssl_protocol: ''
ssl_cipher: ''
php:
install: '1'
settings:
version: '70'
modules:
php:
- cli
- intl
- mcrypt
pear: { }
pecl: { }
ini:
display_errors: 'On'
error_reporting: '-1'
session.save_path: /var/lib/php/session
date.timezone: UTC
fpm_ini:
error_log: /var/log/php-fpm.log
fpm_pools:
phpfp_8dskxs4sc6bp:
ini:
prefix: www
listen: '127.0.0.1:9000'
security.limit_extensions: .php
user: www-user
group: www-data
composer: '1'
composer_home: ''
xdebug:
install: '1'
settings:
xdebug.default_enable: '1'
xdebug.remote_autostart: '0'
xdebug.remote_connect_back: '1'
xdebug.remote_enable: '1'
xdebug.remote_handler: dbgp
xdebug.remote_port: '9000'
blackfire:
install: '0'
settings:
server_id: ''
server_token: ''
agent:
http_proxy: ''
https_proxy: ''
log_file: stderr
log_level: '1'
php:
agent_timeout: '0.25'
log_file: ''
log_level: '1'
xhprof:
install: '0'
wpcli:
install: '0'
version: v0.19.0
drush:
install: '0'
version: 6.3.0
ruby:
install: '1'
versions: { }
python:
install: '1'
packages: { }
versions: { }
nodejs:
install: '0'
npm_packages: { }
hhvm:
install: '0'
nightly: 0
composer: '1'
composer_home: ''
settings: { }
server_ini:
hhvm.server.host: 127.0.0.1
hhvm.server.port: '9000'
hhvm.log.use_log_file: '1'
hhvm.log.file: /var/log/hhvm/error.log
php_ini:
display_errors: 'On'
error_reporting: '-1'
date.timezone: UTC
mysql:
install: '1'
settings:
version: '5.6'
root_password: christina
override_options: { }
adminer: 0
users: { }
databases:
mysqlnd_dl3m722heu31:
name: blogdata
sql: ''
grants: { }
mariadb:
install: '0'
settings:
version: '10.0'
root_password: '123'
override_options: { }
adminer: 0
users:
mariadbnu_17honh69lm86:
name: dbuser
password: '123'
databases:
mariadbnd_l1rsvnj0ghw1:
name: dbname
sql: ''
grants:
mariadbng_y345jmggx662:
user: dbuser
table: '*.*'
privileges:
- ALL
postgresql:
install: '0'
settings:
global:
encoding: UTF8
version: '9.3'
server:
postgres_password: '123'
databases: { }
users: { }
grants: { }
adminer: 0
mongodb:
install: '0'
settings:
auth: 1
bind_ip: 127.0.0.1
port: '27017'
databases: { }
redis:
install: '0'
settings:
conf_port: '6379'
sqlite:
install: '0'
adminer: 0
databases: { }
mailcatcher:
install: '0'
settings:
smtp_ip: 0.0.0.0
smtp_port: 1025
http_ip: 0.0.0.0
http_port: '1080'
mailcatcher_path: /usr/local/rvm/wrappers/default
from_email_method: inline
beanstalkd:
install: '0'
settings:
listenaddress: 0.0.0.0
listenport: '11300'
maxjobsize: '65535'
maxconnections: '1024'
binlogdir: /var/lib/beanstalkd/binlog
binlogfsync: null
binlogsize: '10485760'
beanstalk_console: 0
rabbitmq:
install: '0'
settings:
port: '5672'
users: { }
vhosts: { }
plugins: { }
elastic_search:
install: '0'
settings:
version: 1.4.1
java_install: true
solr:
install: '0'
settings:
version: 4.10.2
port: '8984'
During the vagrant up, I have a lot of red errors, and at the end, this:
The SSH command responded with a non-zero exit status. Vagrant assumes
that this means the command failed. The output for this command should
be in the log above. Please read the output to determine what went
wrong.
But if I'm trying to go to the ip http://192.168.56.101/, I have the Apache page:
Apache2 Ubuntu Default Page, [...]
You should replace this file (located at /var/www/html/index.html)
before continuing to operate your HTTP server.
This file is on the puphpet html folder, but for Zend, I have to link to the public/index.php file
Here is the work tree of my ZF project:

Don't use the box's IP address. Use vhosts and add an entry into your computer's hosts file, then access that URL in your browser.

Related

502 Bad gateway Nginx reversy proxy, connect() failed (111: Connection refused) while connecting to upstream

I have a project, which consist of Go application + Nginx + Db(Postgres). All are building in docker containers.
It is my docker-compose.yml file:
version: "3"
services:
db:
image: postgres:10
environment:
- POSTGRES_PASSWORD=DatabasePassword
- POSTGRES_USER=egor
- POSTGRES_DB=postgres
expose:
- 5432
backend:
build: .
environment:
- POSTGRES_URL=postgres://egor:DatabasePassword#db:5432/postgres?sslmode=disable
- LISTEN_ADDRESS=:5432
depends_on:
- db
proxy:
image: nginx
volumes:
- type: bind
source: ./nginx.conf
target: /etc/nginx/nginx.conf
ports:
- 80:80
depends_on:
- backend
- db
it is my go application:
package main
import (
"database/sql"
"fmt"
"time"
_ "github.com/lib/pq"
"log"
"net/http"
"github.com/caarlos0/env"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type config struct {
PostgresUri string `env:"POSTGRES_URL" envDefault:"postgres://root:pass#localhost:5432/postgres?sslmode=disable"`
ListenAddress string `env:"LISTEN_ADDRESS" envDefault:":7000"`
//PostgresHost string `env:"POSTGRES_HOST" envDefault:":l"`
//PostgresUser string `env:"POSTGRES_USER" envDefault:":root"`
//PostgresPassword string `env:"POSTGRES_PASSWD" envDefault:":qwerty"`
//PostgresName string `env:"POSTGRES_NAME" envDefault:":postgres"`
}
var (
db *sql.DB
errorsCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gocalc_errors_count",
Help: "Gocalc Errors Count Per Type",
},
[]string{"type"},
)
requestsCount = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gocalc_requests_count",
Help: "Gocalc Requests Count",
})
)
func main() {
var err error
// Initing prometheus
prometheus.MustRegister(errorsCount)
prometheus.MustRegister(requestsCount)
// Getting env
cfg := config{}
if err = env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
time.Sleep(time.Second)
fmt.Println("Sleep over!")
// Connecting to database
//psqlInfo := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=5432 sslmode=disable",
// cfg.PostgresHost,cfg.ListenAddress,cfg.PostgresUser,cfg.PostgresPassword,cfg.PostgresName)
//db, err := sql.Open("postgres", "host=db user=egor password=DatabasePassword dbname=postgres port=5432 sslmode=disable")
db, err = sql.Open("postgres",cfg.PostgresUri)
if err != nil {
log.Fatalf("Can't connect to postgresql: %v", err)
}
defer db.Close()
err = db.Ping()
if err != nil {
log.Fatalf("Can't ping database: %v", err)
}
http.HandleFunc("/", handler)
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(cfg.ListenAddress, nil))
}
func handler(w http.ResponseWriter, r *http.Request) {
requestsCount.Inc()
keys, ok := r.URL.Query()["q"]
if !ok || len(keys[0]) < 1 {
errorsCount.WithLabelValues("missing").Inc()
log.Println("Url Param 'q' is missing")
http.Error(w, "Bad Request", 400)
return
}
q := keys[0]
log.Println("Got query: ", q)
var result string
sqlStatement := fmt.Sprintf("SELECT (%s)::numeric", q)
row := db.QueryRow(sqlStatement)
err := row.Scan(&result)
if err != nil {
log.Println("Error from db: %s", err)
errorsCount.WithLabelValues("db").Inc()
http.Error(w, "Internal Server Error", 500)
return
}
fmt.Fprintf(w, "query %s; result %s", q, result)
}
And my nginx configuration:
events{
worker_connections 1024;
}
http{
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://backend:7000;
}
}
}
But when i'm going to try page in browser, i see error page - 502 Bad Gateway nginx.
It is my log:
2022/11/08 23:41:24 [error] 29#29: *1 connect() failed (111: Connection refused) while connecting to upstream, client: xxx.xx.x.x, server: localhost, request: "GET / HTTP/1.1", upstream: "http://xxx.xx.x.x:7000/", host: "0.0.0.0"
What is problem? All services work correctly, only nginx reversy proxy has error
I just put together a small project that represents your scenario. This is the repository structure:
webapp/
nginx/
Dockerfile
nginx.conf
web/
Dockerfile
main.go
docker-compose.yaml
The content of each file are as follows.
nginx/nginx.conf
events{}
http {
server {
listen 80;
location / {
proxy_pass http://backend:7000;
}
}
}
More or less is your same file.
nginx/Dockerfile
FROM nginx
EXPOSE 80
COPY nginx.conf /etc/nginx/nginx.conf
Here, we specify instructions to build the nginx container. We expose only the port 80.
web/main.go
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, World!\n")
})
http.ListenAndServe(":7000", nil)
}
Simple HTTP server with a hard-coded reply. This HTTP server listens for requests on port 7000.
web/Dockerfile
FROM golang:1.12.7-alpine3.10 AS build
WORKDIR /go/src/app
COPY ./main.go ./main.go
RUN go build -o ./bin/gowebserver ./main.go
FROM alpine:latest
COPY --from=build /go/src/app/bin /go/bin
EXPOSE 7000
ENTRYPOINT go/bin/gowebserver
Here, we use the multi-stage build. In the first section we build the HTTP server while in the second one, we copy the executable on a leaner base image of Docker. We expose port 7000 of this container.
docker-compose.yaml
version: "3"
services:
backend:
build: "./web"
expose:
- "7000"
nginx:
build: "./nginx"
ports:
- "80:80"
depends_on:
- "backend"
Here, is the last part that connects all. We expose to the outside only the port 80. Internally, the backend service exposes port 7000 to be contacted by the nginx service.
To spin up everything, you've to run these two commands (in the root folder of the project):
docker-compose build
docker-compose up
To test this solution you've to use your internal IP address (in my case was something like 192.168.1.193) and navigate to the URL http://192.168.1.193/ which should give you an Hello, World! message.
Let me know if this solves your issue!

Go's go-colly library not returning any data when deployed on heroku

I built a web-scraper using Go's go-colly library and it is working pretty good on my local machine but it is not returning any data when deployed on Heroku. What could be the issue? I deployed it as a docker container on Heroku. I am quite new to Go, btw :)
The Scraper:
type Song struct {
Title string
Subtitle string
Link string
}
var wg sync.WaitGroup
var mu sync.Mutex
func getTune(query string, allSongs *[]Song, wg *sync.WaitGroup) {
c := colly.NewCollector(
colly.AllowedDomains("https://get-tune.cc", "get-tune.cc"),
)
c.WithTransport(&http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
IdleConnTimeout: 120 * time.Second,
TLSHandshakeTimeout: 20 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
})
runtime.LockOSThread()
mu.Lock()
defer mu.Unlock()
defer wg.Done()
c.OnHTML(".playlist li", func(element *colly.HTMLElement) {
link := element.Attr("data-mp3")
songs := element.DOM
song := songs.Find(".playlist-name").Find("b").Text()
em := songs.Find(".playlist-name").Find("em").Text()
if len(em) > 5 && !strings.Contains(strings.ToLower(em), "remix") && !strings.Contains(strings.ToLower(em), "mix") && !strings.Contains(strings.ToLower(em), "edit") && !strings.Contains(strings.ToLower(song), "mix") && !strings.Contains(strings.ToLower(song), "edit") && !strings.Contains(strings.ToLower(song), "remix") {
*allSongs = append(*allSongs, Song{
Title: song,
Subtitle: em,
Link: link,
})
}
})
err := c.Visit(fmt.Sprintf("https://get-tune.cc/search/f/%s/", strings.Join(strings.Split(query, " "), "+")))
if err != nil {
fmt.Printf("Error: %v", err)
}
}
func Crawler(query string) []Song {
songs := []Song{}
wg.Add(1)
go getTune(query, &songs, &wg)
wg.Wait()
return songs
}
Dockerfile
FROM golang:1.19.2-alpine3.15 AS builder
WORKDIR /app
COPY go.mod ./
COPY go.sum ./
RUN go mod download
COPY . .
RUN go build -o main .
EXPOSE 4000
CMD ["./main"]
docker-compose.yml
version: '3.9'
services:
redis:
image: 'bitnami/redis:latest'
command: redis-server --requirepass password
ports:
- 6379:6379
volumes:
- $PWD/redis-data:/var/lib/redis
- $PWD/redis.conf:/usr/local/etc/redis/redis.conf
environment:
- REDIS_REPLICATION_MODEL=master
- ALLOW_EMPTY_PASSWORD=yes
app:
build: .
command: go run main.go
volumes:
- .:/app
ports:
- 4000:4000
depends_on:
- redis
heroku.yml
build:
docker:
web: Dockerfile
run:
web: ./main

How to connect go grpc server with dart grpc client using Envoy and Grpc_web

I'm new to grpc_web and envoy.
Please help me to setup following things,
GRPC_Go server is running on ec2 instance as a docker container
Dart web client is running on local pc
Need to make grpc call request from dart web app to grpc_go server
Used envoy proxy for the request forward. Envoy proxy is running as a container in same ec2 instance
I'm getting the following error "Response: null, trailers: {access-control-allow-credentials: true, access-control-allow-origin: http://127.0.0.1:9000, vary: Origin})".
Grpc_Go:
package main
import (
"context"
"flag"
"fmt"
"log"
"net"
"google.golang.org/grpc"
pb "google.golang.org/grpc/examples/helloworld/helloworld"
)
var (
port = flag.Int("port", 50051, "The server port")
)
// server is used to implement helloworld.GreeterServer.
type server struct {
pb.UnimplementedGreeterServer
}
// SayHello implements helloworld.GreeterServer
func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
log.Printf("Received: %v", in.GetName())
return &pb.HelloReply{Message: "Hello " + in.GetName()}, nil
}
func (s *server) SayHelloAgain(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply,
error)
{
return &pb.HelloReply{Message: "Hello again " + in.GetName()}, nil
}
func main() {
flag.Parse()
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterGreeterServer(s, &server{})
log.Printf("server listening at %v", lis.Addr())
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
GRPC_dart_client:
import 'package:grpc/grpc_web.dart';
import 'package:grpc_web/app.dart';
import 'package:grpc_web/src/generated/echo.pbgrpc.dart';
void main() {
final channel = GrpcWebClientChannel.xhr(Uri.parse('http://ec2-ip:8080'));
final service = EchoServiceClient(channel);
final app = EchoApp(service);
final button = querySelector('#send') as ButtonElement;
button.onClick.listen((e) async {
final msg = querySelector('#msg') as TextInputElement;
final value = msg.value!.trim();
msg.value = '';
if (value.isEmpty) return;
if (value.indexOf(' ') > 0) {
final countStr = value.substring(0, value.indexOf(' '));
final count = int.tryParse(countStr);
if (count != null) {
app.repeatEcho(value.substring(value.indexOf(' ') + 1), count);
} else {
app.echo(value);
}
} else {
app.echo(value);
}
});
}
envoy.yaml:
access_log_path: /tmp/admin_access.log
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 8080 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"#type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route:
cluster: echo_service
timeout: 0s
max_stream_duration:
grpc_timeout_header_max: 0s
cors:
allow_origin_string_match:
- prefix: "*"
allow_methods: GET, PUT, DELETE, POST, OPTIONS
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
max_age: "1728000"
expose_headers: custom-header-1,grpc-status,grpc-message
http_filters:
- name: envoy.filters.http.grpc_web
- name: envoy.filters.http.cors
- name: envoy.filters.http.router
clusters:
- name: echo_service
connect_timeout: 0.25s
type: logical_dns
http2_protocol_options: {}
lb_policy: round_robin
load_assignment:
cluster_name: cluster_0
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: app
port_value: 50051
Grpc_go_docker_file:
# Install git.
# Git is required for fetching the dependencies.
RUN apk update && apk add --no-cache git
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main .
# Start a new stage from scratch
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
# Copy the Pre-built binary file from the previous stage. Observe we also copied the .env file
COPY --from=builder /app/main .
# Expose port 50051 to the outside world
EXPOSE 50051
CMD ["./main"]
Envoy_Docker:
COPY envoy.yaml /etc/envoy/envoy.yaml
CMD /usr/local/bin/envoy -c /etc/envoy/envoy.yaml -l trace --log-path /tmp/envoy_info.log
I'm stuck with it more than two days, please help me. Thanks in advance
Thank you all, for your reply.
I fixed this issue with IP of the ec2 instance.
clusters:
- name: echo_service
connect_timeout: 0.25s
type: logical_dns
http2_protocol_options: {}
lb_policy: round_robin
load_assignment:
cluster_name: cluster_0
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: app
port_value: 50051
Instead of container 'address: app' (app is container name) in the envoy.yaml, I used the ip of ec2 instance and container port now envoy is forwarding the request to server.

Executing WDIO-Test locally in a docker container throws error: #wdio/cli:utils: A service failed in the 'onPrepare' hook

I'm executing a headless e2e-test locally in a docker-container like so:
docker-compose up
yarn test
I get this error-message at the beginning:
ERROR #wdio/cli:utils: A service failed in the 'onPrepare' hook
TypeError: Cannot read property 'args' of undefined
at DockerLauncher.onPrepare (C:\myProgs\myWDIOTest\node_modules\wdio-docker-service\lib\launcher.js:30:9)
at C:\myWDIOTest\myWDIOTest\node_modules\#wdio\cli\build\utils.js:24:40
at Array.map (<anonymous>)
at Object.runServiceHook (C:\myProgs\myWDIOTest\node_modules\#wdio\cli\build\utils.js:21:33)
at Launcher.run (C:\myProgs\myWDIOTest\node_modules\#wdio\cli\build\launcher.js:61:27)
at processTicksAndRejections (internal/process/task_queues.js:95:5)
I'm not using the onPrepare-Hook the wdio-configuration-file (see below).
The test carries on and actually finishes successfully every time, just like it's supposed to. At this point simply suppressing this error-message would be a viable solution for me (since this error doesn't compromise the test-results).
There exists a solution here for tests run on saucelabs, however that won't work for me. But this makes me wonder if I have to look for the solution in my docker-compose-file:
version: "3"
services:
chrome:
image: selenium/node-chrome:4.0.0-rc-1-prerelease-20210713
shm_size: 2gb
depends_on:
- selenium-hub
environment:
- SE_EVENT_BUS_HOST=selenium-hub
- SE_EVENT_BUS_PUBLISH_PORT=4442
- SE_EVENT_BUS_SUBSCRIBE_PORT=4443
ports:
- "6900:5900"
selenium-hub:
image: selenium/hub:4.0.0-rc-1-prerelease-20210713
container_name: selenium-hub
ports:
- "4442:4442"
- "4443:4443"
- "4444:4444"
This is the contents of my wdio-configuration-file:
import BrowserOptions from "./browserOpts";
import CucumberOptions from "./cucumberOpts";
const fs = require('fs');
const wdioParallel = require('wdio-cucumber-parallel-execution');
const reporter = require('cucumber-html-reporter');
const currentTime = new Date().toJSON().replace(/:/g, "-");
const jsonTmpDirectory = `reports/json/tmp/`;
let featureFilePath = `featureFiles/*.feature`;
let timeout = 30000;
exports.config = {
hostname: 'localhost',
port: 4444,
sync: true,
specs: [
featureFilePath
],
maxInstances: 1,
capabilities: [{
maxInstances: 1,
browserName: "chrome",
'goog:chromeOptions': BrowserOptions.getChromeOpts(),
}],
logLevel: 'error',
bail: 0,
baseUrl: 'http://localhost',
waitforTimeout: timeout,
connectionRetryTimeout: timeout * 3,
connectionRetryCount: 3,
services: ['docker'],
framework: 'cucumber',
reporters: [
['cucumberjs-json', {
jsonFolder: jsonTmpDirectory,
language: 'de'
}]
],
cucumberOpts: CucumberOptions.getDefaultSettings(),
before: function (capabilities, specs) {
browser._setWindowSize(1024, 768)
},
beforeSuite: function (suite) {
console.log(`Suite "${suite.fullTitle}" from file "${suite.file}" starts`);
},
beforeTest: function (test) {
console.log(`Test "${test.title}" starts`);
},
afterTest: function (test) {
console.log(`Test "${test.title}" finished`);
},
onComplete: () => {
console.log('<<< E2E-TEST COMPLETED >>>\n\n');
try {
let consolidatedJsonArray = wdioParallel.getConsolidatedData({
parallelExecutionReportDirectory: jsonTmpDirectory
});
let jsonFile = `${jsonTmpDirectory}report.json`;
fs.writeFileSync(jsonFile, JSON.stringify(consolidatedJsonArray));
let options = {
theme: 'bootstrap',
jsonFile: jsonFile,
output: `reports/html/report-${currentTime}.html`,
reportSuiteAsScenarios: true,
scenarioTimestamp: true,
launchReport: true,
ignoreBadJsonFile: true
};
reporter.generate(options);
} catch (err) {
console.log('err', err);
}
}
};
Your docker-compose.yml should work with Webdriverio if you remove services: ['docker'], from wdio.conf.js.
Based on a comment in this video, services: ['docker'] is needed if you want wdio to instantiate it's own containers.
To get rid of this error message specify separate configuration file for wdio docker and leave services with empty array - for me it worked and wdio knows that it should run tests on the container, hope it helped
services: [],

Broker not available (loadMetadataForTopics) - kafka-node consumer

node for my nodejs code. I have an api for requeest/response.
First of all, I make a request http://localhost:3000/number1 , after I start a consumer whitch consume messages from a kafka topic and from one partition "receive" and I try to find the message with id = number1 . After I want to return a response to user with this value. So I create a consumer like the follow :
options = {
kafkaHost: 'kafka:9092'
}
const client_node = new kafka_node.KafkaClient(options);
var Consumer = kafka_node.Consumer
var consumer_node = new Consumer(
client_node,
[
{ topic: 'receive.kafka.entities', partition: 0 , offset: 0}
],
{
autoCommit: false,
fetchMaxWaitMs: 100,
fromOffset: 'earliest' ,
groupId: 'kafka-node-group',
asyncPush: false,
}
);
const read = (callback)=>{
let ret = "1"
consumer_node.on('message',async function (message) {
var parse1 = JSON.parse(message.value)
var parse2 = JSON.parse(parse1.payload)
var id = parse2.fullDocument.id
var lastOffset = message.highWaterOffset - 1
//check if there is a query
if(lastOffset <= message.offset || ret !== "1"){
return callback(ret)
}
else if(id === back2){
ret = parse2.fullDocument
}
});
}
let error = {
id: "The entity " + back2 + " not found "
}
read((data)=>{
consumer_node.close(true,function(message){
if(data != "1"){
res.status(200).send(data)
}
else{
res.status(404).send(error)
}
})
})
If I try to make one continuous requests, after the first request I get a response :
{
"message": "Broker not available (loadMetadataForTopics)"
}
my Docker-compose file1 is the following :
zookeeper:
image: confluentinc/cp-zookeeper:5.4.1
container_name: stellio-zookeeper
ports:
- 2181:2181
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- default
- localnet
kafka:
image: confluentinc/cp-enterprise-kafka:latest
container_name: kafka
ports:
- 9092:9092
- 9101:9101
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,EXTERNAL://localhost:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:9092
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
depends_on:
- zookeeper
networks:
- default
- localnet
- my-proxy-net-kafka
networks:
default: # this network (app2)
driver: bridge
my-proxy-net-kafka:
external:
name: kafka_network
Docker-compose file2
app:
container_name: docker-node
hostname: docker-node
restart: always
build: .
command: nodemon /usr/src/app/index.js
networks:
- default
- proxynet-kafka
ports:
- '3000:3000'
volumes:
- .:/usr/src/app
networks:
default:
driver: bridge
proxynet-kafka:
name: kafka_network
Why that happens? Can you help me to fix this?
[ If you want more information feel free to ask me :) ]

Resources