I am deploying micro services app to swarm. I've already written docker-compose-prod.yml, which is working fine when being deployed just though docker compose up, but when I run docker stack deploy --compose-file docker-compose-prod.yml app services, which must connect to mongo db service are failing because they don't see shared network.
docker-compose-prod.yml:
version: "3.9"
services:
redis:
build: redis/
image: someimage
restart: always
networks:
- redis
mongodb:
image: mongo:4.4.7-focal
restart: always
env_file:
- mongo/.env
volumes:
- mongodata:/data/db
networks:
- mongo
token_service:
build: token_service/
image: someimage
command: "-cfg ./config/config_prod.yml"
networks:
- backend
user_service:
build: user_service/
image: someimage
command: "-db_address ${MONGODB_ADDRESS:-mongodb://mongodb:${MONGODB_PORT:-27017}} -port ${USER_SERVICE_PORT:-9385}"
networks:
- backend
- mongo
depends_on:
- mongodb
music_service:
build: music_service/
image: someimage
command:
[
"-user_service_address",
"user_service:${USER_SERVICE_PORT:-9385}",
"-port",
"${MUSIC_SERVICE_PORT:-7070}",
"-db_address",
"${MONGODB_ADDRESS:-27017}"
]
depends_on:
- mongodb
networks:
- mongo
- backend
photo_service:
build: photo_service/
image: someimage
command:
[
"-music_service_address",
"music_service:${MUSIC_SERVICE_PORT:-7070}",
"-user_service_address",
"user_service:${USER_SERVICE_PORT:-9385}",
"-port",
"${PHOTO_SERVICE_PORT:-7345}"
]
depends_on:
- music_service
- user_service
- mongodb
networks:
- backend
player_service:
build: player_service/
image: someimage
command:
[
"-msaddress",
"music_service:${MUSIC_SERVICE_PORT:-7070}",
"-rport",
"${REDIS_PORT:-6379}",
"-rhost",
"redis",
"-port",
"${PLAYER_SERVICE_PORT:-6060}"
]
environment:
- GIN_MODE=release
depends_on:
- redis
networks:
- backend
- redis
playlist_service:
build: playlist_service/
image: someimage
command:
[
"-msaddress",
"music_service:${MUSIC_SERVICE_PORT:-7070}",
"-rport",
"${REDIS_PORT:-6379}",
"-rhost",
"redis",
"-port",
"${PLAYLIST_SERVICE_PORT:-6061}",
"-usaddress",
"user_service:${USER_SERVICE_PORT:-9385}"
]
depends_on:
- redis
- music_service
- user_service
networks:
- backend
- redis
networks:
backend:
mongo:
redis:
volumes:
mongodata:
redisdata:
someimage's are obviously different. I've uploaded images to docker-hub, so docker swarm can pull them fluently
docker stack ps app:
music_service logs:
app_music_service.1.jhuxk5kck54x#sw1 | INFO 2023/02/11 10:51:49 Trying to connect to database
app_music_service.1.jhuxk5kck54x#sw1 | panic: runtime error: invalid memory address or nil pointer dereference
app_music_service.1.jhuxk5kck54x#sw1 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x40 pc=0x86baad]
app_music_service.1.jhuxk5kck54x#sw1 |
app_music_service.1.jhuxk5kck54x#sw1 | goroutine 1 [running]:
app_music_service.1.jhuxk5kck54x#sw1 | go.mongodb.org/mongo-driver/mongo.newDatabase(0x0, {0xb49e74, 0xf}, {0x0?, 0x37?, 0x37?})
app_music_service.1.jhuxk5kck54x#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.11.1/mongo/database.go:47 +0xcd
app_music_service.1.jhuxk5kck54x#sw1 | go.mongodb.org/mongo-driver/mongo.(*Client).Database(...)
app_music_service.1.jhuxk5kck54x#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.11.1/mongo/client.go:627
app_music_service.1.jhuxk5kck54x#sw1 | music_service/funcs/db.setupConfigVariables()
app_music_service.1.jhuxk5kck54x#sw1 | /go/src/music_service/funcs/db/db_connection.go:26 +0x11b
app_music_service.1.jhuxk5kck54x#sw1 | music_service/funcs/db.connectToDb()
app_music_service.1.jhuxk5kck54x#sw1 | /go/src/music_service/funcs/db/db_connection.go:18 +0x19
app_music_service.1.jhuxk5kck54x#sw1 | music_service/funcs/db.ConnectBD()
app_music_service.1.jhuxk5kck54x#sw1 | /go/src/music_service/funcs/db/db_connection.go:13 +0x54
app_music_service.1.jhuxk5kck54x#sw1 | main.main()
app_music_service.1.jhuxk5kck54x#sw1 | /go/src/music_service/main.go:11 +0x71
app_music_service.1.la9olwptv2yi#sw1 | INFO 2023/02/11 10:52:00 Trying to connect to database
app_music_service.1.la9olwptv2yi#sw1 | panic: runtime error: invalid memory address or nil pointer dereference
app_music_service.1.la9olwptv2yi#sw1 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x40 pc=0x86baad]
app_music_service.1.la9olwptv2yi#sw1 |
app_music_service.1.la9olwptv2yi#sw1 | goroutine 1 [running]:
app_music_service.1.la9olwptv2yi#sw1 | go.mongodb.org/mongo-driver/mongo.newDatabase(0x0, {0xb49e74, 0xf}, {0x0?, 0x37?, 0x37?})
app_music_service.1.la9olwptv2yi#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.11.1/mongo/database.go:47 +0xcd
app_music_service.1.la9olwptv2yi#sw1 | go.mongodb.org/mongo-driver/mongo.(*Client).Database(...)
app_music_service.1.la9olwptv2yi#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.11.1/mongo/client.go:627
app_music_service.1.la9olwptv2yi#sw1 | music_service/funcs/db.setupConfigVariables()
app_music_service.1.la9olwptv2yi#sw1 | /go/src/music_service/funcs/db/db_connection.go:26 +0x11b
app_music_service.1.la9olwptv2yi#sw1 | music_service/funcs/db.connectToDb()
app_music_service.1.la9olwptv2yi#sw1 | /go/src/music_service/funcs/db/db_connection.go:18 +0x19
app_music_service.1.la9olwptv2yi#sw1 | music_service/funcs/db.ConnectBD()
app_music_service.1.la9olwptv2yi#sw1 | /go/src/music_service/funcs/db/db_connection.go:13 +0x54
app_music_service.1.la9olwptv2yi#sw1 | main.main()
app_music_service.1.la9olwptv2yi#sw1 | /go/src/music_service/main.go:11 +0x71
app_music_service.1.n48kwxkz27be#sw1 | INFO 2023/02/11 10:52:21 Trying to connect to database
app_music_service.1.n48kwxkz27be#sw1 | panic: runtime error: invalid memory address or nil pointer dereference
app_music_service.1.n48kwxkz27be#sw1 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x40 pc=0x86baad]
app_music_service.1.n48kwxkz27be#sw1 |
app_music_service.1.n48kwxkz27be#sw1 | goroutine 1 [running]:
app_music_service.1.n48kwxkz27be#sw1 | go.mongodb.org/mongo-driver/mongo.newDatabase(0x0, {0xb49e74, 0xf}, {0x0?, 0x37?, 0x37?})
app_music_service.1.n48kwxkz27be#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.11.1/mongo/database.go:47 +0xcd
app_music_service.1.n48kwxkz27be#sw1 | go.mongodb.org/mongo-driver/mongo.(*Client).Database(...)
app_music_service.1.n48kwxkz27be#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.11.1/mongo/client.go:627
app_music_service.1.n48kwxkz27be#sw1 | music_service/funcs/db.setupConfigVariables()
app_music_service.1.n48kwxkz27be#sw1 | /go/src/music_service/funcs/db/db_connection.go:26 +0x11b
app_music_service.1.n48kwxkz27be#sw1 | music_service/funcs/db.connectToDb()
app_music_service.1.n48kwxkz27be#sw1 | /go/src/music_service/funcs/db/db_connection.go:18 +0x19
app_music_service.1.n48kwxkz27be#sw1 | music_service/funcs/db.ConnectBD()
app_music_service.1.n48kwxkz27be#sw1 | /go/src/music_service/funcs/db/db_connection.go:13 +0x54
app_music_service.1.n48kwxkz27be#sw1 | main.main()
app_music_service.1.n48kwxkz27be#sw1 | /go/src/music_service/main.go:11 +0x71
user_service logs:
app_user_service.1.rb076726mnl6#sw1 | INFO 2023/02/11 10:53:31 Trying to connect to database at mongodb://mongodb:${MONGODB_PORT:-27017}
app_user_service.1.025nl14hev72#sw1 | /go/src/user_service/main.go:11 +0x72
app_user_service.1.rb076726mnl6#sw1 | panic: runtime error: invalid memory address or nil pointer dereference
app_user_service.1.rb076726mnl6#sw1 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x2d8 pc=0x7c394d]
app_user_service.1.rb076726mnl6#sw1 |
app_user_service.1.rb076726mnl6#sw1 | goroutine 1 [running]:
app_user_service.1.rb076726mnl6#sw1 | go.mongodb.org/mongo-driver/mongo.newDatabase(0x0, {0xab02b9, 0xf}, {0x0?, 0x63?, 0x70?})
app_user_service.1.rb076726mnl6#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.3.1/mongo/database.go:46 +0xcd
app_user_service.1.rb076726mnl6#sw1 | go.mongodb.org/mongo-driver/mongo.(*Client).Database(...)
app_user_service.1.rb076726mnl6#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.3.1/mongo/client.go:655
app_user_service.1.rb076726mnl6#sw1 | user_service/funcs/db.setupConfigVariables()
app_user_service.1.rb076726mnl6#sw1 | /go/src/user_service/funcs/db/db_funcs.go:27 +0x10c
app_user_service.1.rb076726mnl6#sw1 | user_service/funcs/db.connectToDb()
app_user_service.1.rb076726mnl6#sw1 | /go/src/user_service/funcs/db/db_funcs.go:18 +0x6c
app_user_service.1.rb076726mnl6#sw1 | user_service/funcs/db.ConnectBD(...)
app_user_service.1.rb076726mnl6#sw1 | /go/src/user_service/funcs/db/db_funcs.go:13
app_user_service.1.mobmprrfs68e#sw1 | INFO 2023/02/11 10:52:58 Trying to connect to database at mongodb://mongodb:${MONGODB_PORT:-27017}
app_user_service.1.mobmprrfs68e#sw1 | panic: runtime error: invalid memory address or nil pointer dereference
app_user_service.1.mobmprrfs68e#sw1 | [signal SIGSEGV: segmentation violation code=0x1 addr=0x2d8 pc=0x7c394d]
app_user_service.1.mobmprrfs68e#sw1 |
app_user_service.1.mobmprrfs68e#sw1 | goroutine 1 [running]:
app_user_service.1.mobmprrfs68e#sw1 | go.mongodb.org/mongo-driver/mongo.newDatabase(0x0, {0xab02b9, 0xf}, {0x0?, 0x63?, 0x70?})
app_user_service.1.mobmprrfs68e#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.3.1/mongo/database.go:46 +0xcd
app_user_service.1.rb076726mnl6#sw1 | main.main()
app_user_service.1.mobmprrfs68e#sw1 | go.mongodb.org/mongo-driver/mongo.(*Client).Database(...)
app_user_service.1.rb076726mnl6#sw1 | /go/src/user_service/main.go:11 +0x72
app_user_service.1.mobmprrfs68e#sw1 | /go/pkg/mod/go.mongodb.org/mongo-driver#v1.3.1/mongo/client.go:655
app_user_service.1.mobmprrfs68e#sw1 | user_service/funcs/db.setupConfigVariables()
app_user_service.1.mobmprrfs68e#sw1 | /go/src/user_service/funcs/db/db_funcs.go:27 +0x10c
app_user_service.1.mobmprrfs68e#sw1 | user_service/funcs/db.connectToDb()
app_user_service.1.mobmprrfs68e#sw1 | /go/src/user_service/funcs/db/db_funcs.go:18 +0x6c
app_user_service.1.mobmprrfs68e#sw1 | user_service/funcs/db.ConnectBD(...)
app_user_service.1.mobmprrfs68e#sw1 | /go/src/user_service/funcs/db/db_funcs.go:13
app_user_service.1.mobmprrfs68e#sw1 | main.main()
app_user_service.1.mobmprrfs68e#sw1 | /go/src/user_service/main.go:11 +0x72
From user_service's and music_service's logs we can see, that the problem is in the connection to mongodb
Question
In user_service logs I've noticed, that my environments are not working as I suggested. As in the first question it is working fine if not in swarm. How to fix it?
Related
I've installed nvidia-container-runtime on my machine (Ubuntu 22.04), and can access the GPU through docker run.
docker run -it --rm --gpus all selenium/node-chrome:3.141.59 nvidia-smi
Mon Oct 24 00:32:32 2022
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 520.61.05 Driver Version: 520.61.05 CUDA Version: 11.8 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA GeForce ... On | 00000000:0A:00.0 Off | N/A |
| 0% 41C P8 44W / 370W | 68MiB / 10240MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
However, when running with the following docker-compose.yml, nvidia-smi can't be found. Applications inside the container don't seem to be using the GPU either.
version: "3.8"
services:
nvidia:
image: selenium/node-chrome:3.141.59
runtime: nvidia
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
command:
["nvidia-smi"]
Running docker-compose up
[+] Running 1/0
⠿ Container docker-compose-gpu-nvidia-1 Recreated 0.0s
Attaching to docker-compose-gpu-nvidia-1
Error response from daemon: failed to create shim: OCI runtime create failed: runc create failed: unable to start container process: exec: "nvidia-smi": executable file not found in $PATH: unknown
If I swap the selenium image to nvidia/cuda, docker-compose can see the GPU. Why is the GPU accessible in docker run but not docker-compose?
Specifying the driver & count fixed this.
version: "3.8"
services:
nvidia:
image: selenium/node-chrome:3.141.59
runtime: nvidia
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
command:
["nvidia-smi"]
I'm not sure why this worked - the docs seem to indicate that omitting these will just use all available GPUs.
I have just created my docker-compose file, trying to run sonarqube server along side posgres and sonarscanner. The sonarqube server and the database can connect however my sonarscanner cannot reach the sonarqube server.
This is my docker-compose file:
version: "3"
services:
sonarqube:
image: sonarqube
build: .
expose:
- 9000
ports:
- "127.0.0.1:9000:9000"
networks:
- sonarnet
environment:
- sonar.jdbc.url=jdbc:postgresql://postgres:5432/sonar
- sonar.jdbc.username=sonar
- sonar.jdbc.password=sonar
volumes:
- sonarqube_conf:/opt/sonarqube/conf
- sonarqube_data:/opt/sonarqube/data
- sonarqube_extensions:/opt/sonarqube/extensions
- sonarqube_bundled-plugins:/opt/sonarqube/lib/bundled-plugins
postgres:
image: postgres
build: .
networks:
- sonarnet
ports:
- "5432:5432"
environment:
- POSTGRES_USER=sonar
- POSTGRES_PASSWORD=sonar
volumes:
- postgresql:/var/lib/postgresql
- postgresql_data:/var/lib/postgresql/data
sonarscanner:
image: newtmitch/sonar-scanner
networks:
- sonarnet
depends_on:
- sonarqube
volumes:
- ./:/usr/src
networks:
sonarnet:
volumes:
sonarqube_conf:
sonarqube_data:
sonarqube_extensions:
sonarqube_bundled-plugins:
postgresql:
postgresql_data:
This is my sonar-project.propeties file:
# must be unique in a given SonarQube instance
sonar.projectKey=toh-token
# --- optional properties ---
#defaults to project key
#sonar.projectName=toh
# defaults to 'not provided'
#sonar.projectVersion=1.0
# Path is relative to the sonar-project.properties file. Defaults to .
#sonar.sources=$HOME/.solo/angular/toh
# Encoding of the source code. Default is default system encoding
#sonar.sourceEncoding=UTF-8
My sonar-project.properties is located in the same directory as the docker-compose file.
This is what happens whenever I start the services:
Attaching to sonarqube-postgres-1, sonarqube-sonarqube-1, sonarqube-sonarscanner-1
sonarqube-sonarqube-1 | Dropping Privileges
sonarqube-postgres-1 |
sonarqube-postgres-1 | PostgreSQL Database directory appears to contain a database; Skipping initialization
sonarqube-postgres-1 |
sonarqube-postgres-1 | 2022-06-12 20:59:39.522 UTC [1] LOG: starting PostgreSQL 14.3 (Debian 14.3-1.pgdg110+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
sonarqube-postgres-1 | 2022-06-12 20:59:39.523 UTC [1] LOG: listening on IPv4 address "0.0.0.0", port 5432
sonarqube-postgres-1 | 2022-06-12 20:59:39.523 UTC [1] LOG: listening on IPv6 address "::", port 5432
sonarqube-postgres-1 | 2022-06-12 20:59:39.525 UTC [1] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432"
sonarqube-postgres-1 | 2022-06-12 20:59:39.533 UTC [26] LOG: database system was shut down at 2022-06-12 20:57:58 UTC
sonarqube-postgres-1 | 2022-06-12 20:59:39.542 UTC [1] LOG: database system is ready to accept connections
sonarqube-sonarscanner-1 | INFO: Scanner configuration file: /usr/lib/sonar-scanner/conf/sonar-scanner.properties
sonarqube-sonarscanner-1 | INFO: Project root configuration file: /usr/src/sonar-project.properties
sonarqube-sonarscanner-1 | INFO: SonarScanner 4.5.0.2216
sonarqube-sonarscanner-1 | INFO: Java 12-ea Oracle Corporation (64-bit)
sonarqube-sonarscanner-1 | INFO: Linux 5.10.117-1-MANJARO amd64
sonarqube-sonarscanner-1 | INFO: User cache: /root/.sonar/cache
sonarqube-sonarqube-1 | 2022.06.12 20:59:40 INFO app[][o.s.a.AppFileSystem] Cleaning or creating temp directory /opt/sonarqube/temp
sonarqube-sonarqube-1 | 2022.06.12 20:59:40 INFO app[][o.s.a.es.EsSettings] Elasticsearch listening on [HTTP: 127.0.0.1:9001, TCP: 127.0.0.1:41087]
sonarqube-sonarscanner-1 | ERROR: SonarQube server [http://sonarqube:9000] can not be reached
sonarqube-sonarscanner-1 | INFO: ------------------------------------------------------------------------
sonarqube-sonarscanner-1 | INFO: EXECUTION FAILURE
sonarqube-sonarscanner-1 | INFO: ------------------------------------------------------------------------
sonarqube-sonarscanner-1 | INFO: Total time: 0.802s
sonarqube-sonarscanner-1 | INFO: Final Memory: 3M/20M
sonarqube-sonarscanner-1 | INFO: ------------------------------------------------------------------------
sonarqube-sonarscanner-1 | ERROR: Error during SonarScanner execution
sonarqube-sonarscanner-1 | org.sonarsource.scanner.api.internal.ScannerException: Unable to execute SonarScanner analysis
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.IsolatedLauncherFactory.lambda$createLauncher$0(IsolatedLauncherFactory.java:85)
sonarqube-sonarscanner-1 | at java.base/java.security.AccessController.doPrivileged(AccessController.java:310)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.IsolatedLauncherFactory.createLauncher(IsolatedLauncherFactory.java:74)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.IsolatedLauncherFactory.createLauncher(IsolatedLauncherFactory.java:70)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.EmbeddedScanner.doStart(EmbeddedScanner.java:185)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.EmbeddedScanner.start(EmbeddedScanner.java:123)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.cli.Main.execute(Main.java:73)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.cli.Main.main(Main.java:61)
sonarqube-sonarscanner-1 | Caused by: java.lang.IllegalStateException: Fail to get bootstrap index from server
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.BootstrapIndexDownloader.getIndex(BootstrapIndexDownloader.java:42)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.JarDownloader.getScannerEngineFiles(JarDownloader.java:58)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.JarDownloader.download(JarDownloader.java:53)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.IsolatedLauncherFactory.lambda$createLauncher$0(IsolatedLauncherFactory.java:76)
sonarqube-sonarscanner-1 | ... 7 more
sonarqube-sonarscanner-1 | Caused by: java.net.ConnectException: Failed to connect to sonarqube/172.30.0.2:9000
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.RealConnection.connectSocket(RealConnection.java:265)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.RealConnection.connect(RealConnection.java:183)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.ExchangeFinder.findConnection(ExchangeFinder.java:224)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.ExchangeFinder.findHealthyConnection(ExchangeFinder.java:108)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.ExchangeFinder.find(ExchangeFinder.java:88)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.Transmitter.newExchange(Transmitter.java:169)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.ConnectInterceptor.intercept(ConnectInterceptor.java:41)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:142)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:117)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.cache.CacheInterceptor.intercept(CacheInterceptor.java:94)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:142)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:117)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.BridgeInterceptor.intercept(BridgeInterceptor.java:93)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:142)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RetryAndFollowUpInterceptor.intercept(RetryAndFollowUpInterceptor.java:88)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:142)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.http.RealInterceptorChain.proceed(RealInterceptorChain.java:117)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.RealCall.getResponseWithInterceptorChain(RealCall.java:221)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.RealCall.execute(RealCall.java:81)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.ServerConnection.callUrl(ServerConnection.java:114)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.ServerConnection.downloadString(ServerConnection.java:99)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.BootstrapIndexDownloader.getIndex(BootstrapIndexDownloader.java:39)
sonarqube-sonarscanner-1 | ... 10 more
sonarqube-sonarscanner-1 | Caused by: java.net.ConnectException: Connection refused (Connection refused)
sonarqube-sonarscanner-1 | at java.base/java.net.PlainSocketImpl.socketConnect(Native Method)
sonarqube-sonarscanner-1 | at java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)
sonarqube-sonarscanner-1 | at java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)
sonarqube-sonarscanner-1 | at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)
sonarqube-sonarscanner-1 | at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:403)
sonarqube-sonarscanner-1 | at java.base/java.net.Socket.connect(Socket.java:591)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.platform.Platform.connectSocket(Platform.java:130)
sonarqube-sonarscanner-1 | at org.sonarsource.scanner.api.internal.shaded.okhttp.internal.connection.RealConnection.connectSocket(RealConnection.java:263)
sonarqube-sonarscanner-1 | ... 31 more
sonarqube-sonarscanner-1 | ERROR:
sonarqube-sonarscanner-1 | ERROR: Re-run SonarScanner using the -X switch to enable full debug logging.
Is there something I am doing wrong?
As #Hans Killian said, the issue was with the scanner trying to connect to the server before the server was up and running. I fixed it by just adding the following in the service of the scanner:
command: ["sh", "-c", "sleep 60 && sonar-scanner && -Dsonar.projectBaseDir=/usr/src]. This allows the scanner to be suspended until the server is up and running
I then added the following credentials in the sonar.project.properties file:
sonar.login=admin
sonar.password=admin
I want to run a container based on python:3.8.8-slim-buster that needs access to the GPU.
When I build it from this Dockerfile:
FROM python:3.8.8-slim-buster
CMD ["sleep", "infinity"]
and then run it with "--gpus all" flag and exec nvidia-smi i get a proper response:
Sat Jun 19 12:26:57 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 465.27 Driver Version: 465.27 CUDA Version: 11.3 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 NVIDIA GeForce ... Off | 00000000:01:00.0 Off | N/A |
| N/A 45C P8 N/A / N/A | 301MiB / 1878MiB | 14% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
and when I use this docker-compose:
services:
test:
image: tensorflow/tensorflow:2.5.0-gpu
command: sleep infinity
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
and exec nvidia-smi after running it i get the same response.
But when i replace the image in the docker-compose to python:3.8.8-slim-buster like in the Dockerfile, i get this response:
OCI runtime exec failed: exec failed: container_linux.go:380: starting container process caused: exec: "nvidia-smi": executable file not found in $PATH: unknown
I appreciate any help figuring this out.
I am trying to run the clara train example, but when I execute the startClaraTrainNoteBooks.sh, the container cannot find the nvidia driver.
I already know that the script executes docker-compose.yml. So I tested whether docker-compose can found the nvidia driver:
services:
test:
image: nvidia/cuda:10.2-base
command: nvidia-smi
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
device_ids: ['0']
Output:
USER#test:~$ docker-compose up
WARNING: Found orphan containers (hp_nvsmi_1) for this project. If you removed or renamed this service in your compose file, you can run this command with the --remove-orphans flag to clean it up.
Starting hp_test_1 ... done
Attaching to hp_test_1
test_1 | Mon Jun 7 09:01:44 2021
test_1 | +-----------------------------------------------------------------------------+
test_1 | | NVIDIA-SMI 460.27.04 Driver Version: 460.27.04 CUDA Version: 11.2 |
test_1 | |-------------------------------+----------------------+----------------------+
test_1 | | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
test_1 | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
test_1 | | | | MIG M. |
test_1 | |===============================+======================+======================|
test_1 | | 0 GeForce RTX 206... Off | 00000000:01:00.0 Off | N/A |
test_1 | | 0% 34C P8 17W / 215W | 100MiB / 7979MiB | 0% Default |
test_1 | | | | N/A |
test_1 | +-------------------------------+----------------------+----------------------+
test_1 |
test_1 | +-----------------------------------------------------------------------------+
test_1 | | Processes: |
test_1 | | GPU GI CI PID Type Process name GPU Memory |
test_1 | | ID ID Usage |
test_1 | |=============================================================================|
test_1 | +-----------------------------------------------------------------------------+
hp_test_1 exited with code 0
But the startClaraTrainNoteBooks.sh cna not find it.
root#claratrain:/claraDevDay# nvidia-smi
root#claratrain:/claraDevDay#
Actually, startDocker.sh can find the driver.
root#c7c2d5597eb8:/claraDevDay# nvidia-smi
Mon Jun 7 09:11:43 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.27.04 Driver Version: 460.27.04 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 GeForce RTX 206... Off | 00000000:01:00.0 Off | N/A |
| 0% 35C P8 17W / 215W | 100MiB / 7979MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
root#c7c2d5597eb8:/claraDevDay#
What should I do?
The docker-compose.yml script need to rewrite like this and working:
# SPDX-License-Identifier: Apache-2.0
version: "3.8"
services:
claratrain:
container_name: claradevday-pt
hostname: claratrain
##### use vanilla clara train docker
#image: nvcr.io/nvidia/clara-train-sdk:v4.0
##### to build image with GPU dashboard inside jupyter lab
build:
context: ./dockerWGPUDashboardPlugin/ # Project root
dockerfile: ./Dockerfile # Relative to context
image: clara-train-nvdashboard:v4.0
depends_on:
- tritonserver
ports:
- "3030:8888" # Jupyter lab port
- "3031:5000" # AIAA port
ipc: host
volumes:
- ${TRAIN_DEV_DAY_ROOT}:/claraDevDay/
- /raid/users/aharouni/data:/data/
command: "jupyter lab /claraDevDay --ip 0.0.0.0 --allow-root --no-browser --config /claraDevDay/scripts/jupyter_notebook_config.py"
# command: tail -f /dev/null
# tty: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [ gpu ]
# To specify certain GPU uncomment line below
#device_ids: ['0,3']
#############################################################
tritonserver:
image: nvcr.io/nvidia/tritonserver:21.02-py3
container_name: aiaa-triton
hostname: tritonserver
restart: unless-stopped
command: >
sh -c "chmod 777 /triton_models &&
/opt/tritonserver/bin/tritonserver \
--model-store /triton_models \
--model-control-mode="poll" \
--repository-poll-secs=5 \
--log-verbose ${TRITON_VERBOSE}"
volumes:
- ${TRAIN_DEV_DAY_ROOT}/AIAA/workspace/triton_models:/triton_models
# shm_size: 1gb
# ulimits:
# memlock: -1
# stack: 67108864
# logging:
# driver: json-file
I need help to run a database with docker and nodejs. I do not understand where I'm going wrong, but I can not make connection between my container with database and my container with node. This is the db link in docker: "https://hub.docker.com/_/rethinkdb/".Then follows:
my Dockerfile
FROM node:latest
ENV HOME=/src/jv-agricultor
RUN mkdir -p $HOME/
WORKDIR $HOME/
ADD package* $HOME/
RUN npm install
EXPOSE 80
ADD . $HOME/
CMD ["node", "node_modules/.bin/nodemon", "-L", "bin/www"]
My docker-compose.yml
version: "3"
volumes:
rethindb-data:
external: true
services:
db:
image: rethinkdb:latest
ports:
- "8080:8080"
- "29015:29015"
- "28015:28015"
api:
image: hello-nodemon
environment:
- NODE_ENV=development
- PORT=80
- DB_HOST=localhost
- DB_PORT=28015
deploy:
# replicas: 5
resources:
limits:
cpus: "0.1"
memory: 50M
restart_policy:
condition: on-failure
ports:
- "3000:80"
volumes:
- .:/src/jv-agricultor
- /src/jv-agricultor/node_modules
depends_on:
- db
networks:
- webnet
networks:
webnet:
i run: docker stack deploy -c docker-compose.yml webservice
My docker service
ID NAME MODE REPLICAS IMAGE PORTS
yez42a7w8khs webservice_api replicated 1/1 hello-nodemon:latest *:3000->80/tcp
n8idu78cp18m webservice_db replicated 1/1 rethinkdb:latest *:8080->8080/tcp,*:28015->28015/tcp,*:29015->29015/tcp
My docker service api (here is node/express)
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
p20qdagcspjc webservice_api.1 hello-nodemon:latest abner Running Running 28 minutes ago
My Docker service db
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
3046xuo4l8ix webservice_db.1 rethinkdb:latest abner Running Running 30 minutes ago
My logs internal db
webservice_db.1.3046xuo4l8ix#abner | Recursively removing directory /data/rethinkdb_data/tmp
webservice_db.1.3046xuo4l8ix#abner | Initializing directory /data/rethinkdb_data
webservice_db.1.3046xuo4l8ix#abner | Running rethinkdb 2.3.6~0jessie (GCC 4.9.2)...
webservice_db.1.3046xuo4l8ix#abner | Running on Linux 4.15.0-24-generic x86_64
webservice_db.1.3046xuo4l8ix#abner | Loading data from directory /data/rethinkdb_data
webservice_db.1.3046xuo4l8ix#abner | Listening for intracluster connections on port 29015
webservice_db.1.3046xuo4l8ix#abner | Listening for client driver connections on port 28015
webservice_db.1.3046xuo4l8ix#abner | Listening for administrative HTTP connections on port 8080
webservice_db.1.3046xuo4l8ix#abner | Listening on cluster addresses: 127.0.0.1, 172.18.0.3, 10.0.5.182, 10.0.5.183, 10.255.11.212, 10.255.11.213
webservice_db.1.3046xuo4l8ix#abner | Listening on driver addresses: 127.0.0.1, 172.18.0.3, 10.0.5.182, 10.0.5.183, 10.255.11.212, 10.255.11.213
webservice_db.1.3046xuo4l8ix#abner | Listening on http addresses: 127.0.0.1, 172.18.0.3, 10.0.5.182, 10.0.5.183, 10.255.11.212, 10.255.11.213
webservice_db.1.3046xuo4l8ix#abner | Server ready, "069fd360acfb_jot" c1cf5173-cf0d-457f-9c8f-4ba1756c28d8
my app.js
...
var connect = require('./lib/connect');
console.log('DB_HOST: ' + process.env.DB_HOST);
console.log('DB_PORT: ' + process.env.DB_PORT);
console.log('PORT: ' + process.env.PORT);
console.log('NODE_ENV: ' + process.env.NODE_ENV);
...
My connect middleware
'use strict'
// import r from 'rethinkdb';
var r = require('rethinkdb');
module.exports._connect = (function _connect(req, res, next) {
r.connect( {host: process.env.DB_HOST, port: process.env.DB_PORT}, (err, conn) => {
console.log(err);
})
})();
My service docker api logs respose
webservice_api.1.p20qdagcspjc#abner | [nodemon] restarting due to changes...
webservice_api.1.p20qdagcspjc#abner | [nodemon] starting `node bin/www`
webservice_api.1.p20qdagcspjc#abner | DB_HOST: localhost
webservice_api.1.p20qdagcspjc#abner | DB_PORT: 28015
webservice_api.1.p20qdagcspjc#abner | PORT: 80
webservice_api.1.p20qdagcspjc#abner | NODE_ENV: development
webservice_api.1.p20qdagcspjc#abner | { ReqlDriverError: Could not connect to localhost:28015.
webservice_api.1.p20qdagcspjc#abner | connect ECONNREFUSED 127.0.0.1:28015
webservice_api.1.p20qdagcspjc#abner | at ReqlDriverError.ReqlError [as constructor] (/src/jv-agricultor/node_modules/rethinkdb/errors.js:23:13)
webservice_api.1.p20qdagcspjc#abner | at new ReqlDriverError (/src/jv-agricultor/node_modules/rethinkdb/errors.js:68:50)
webservice_api.1.p20qdagcspjc#abner | at TcpConnection.<anonymous> (/src/jv-agricultor/node_modules/rethinkdb/net.js:94:27)
webservice_api.1.p20qdagcspjc#abner | at Object.onceWrapper (events.js:273:13)
webservice_api.1.p20qdagcspjc#abner | at TcpConnection.emit (events.js:182:13)
webservice_api.1.p20qdagcspjc#abner | at Socket.<anonymous> (/src/jv-agricultor/node_modules/rethinkdb/net.js:705:22)
webservice_api.1.p20qdagcspjc#abner | at Socket.emit (events.js:187:15)
webservice_api.1.p20qdagcspjc#abner | at emitErrorNT (internal/streams/destroy.js:82:8)
webservice_api.1.p20qdagcspjc#abner | at emitErrorAndCloseNT (internal/streams/destroy.js:50:3)
webservice_api.1.p20qdagcspjc#abner | at process._tickCallback (internal/process/next_tick.js:63:19)
webservice_api.1.p20qdagcspjc#abner | From previous event:
webservice_api.1.p20qdagcspjc#abner | at Function.<anonymous> (/src/jv-agricultor/node_modules/rethinkdb/net.js:945:10)
webservice_api.1.p20qdagcspjc#abner | at Function.connect (/src/jv-agricultor/node_modules/rethinkdb/util.js:43:16)
webservice_api.1.p20qdagcspjc#abner | at _connect (/src/jv-agricultor/lib/connect.js:9:7)
webservice_api.1.p20qdagcspjc#abner | at Object.<anonymous> (/src/jv-agricultor/lib/connect.js:19:3)
webservice_api.1.p20qdagcspjc#abner | at Module._compile (internal/modules/cjs/loader.js:689:30)
webservice_api.1.p20qdagcspjc#abner | at Object.Module._extensions..js (internal/modules/cjs/loader.js:700:10)
webservice_api.1.p20qdagcspjc#abner | at Module.load (internal/modules/cjs/loader.js:599:32)
webservice_api.1.p20qdagcspjc#abner | at tryModuleLoad (internal/modules/cjs/loader.js:538:12)
webservice_api.1.p20qdagcspjc#abner | at Function.Module._load (internal/modules/cjs/loader.js:530:3)
webservice_api.1.p20qdagcspjc#abner | at Module.require (internal/modules/cjs/loader.js:637:17)
webservice_api.1.p20qdagcspjc#abner | at require (internal/modules/cjs/helpers.js:20:18)
webservice_api.1.p20qdagcspjc#abner | at Object.<anonymous> (/src/jv-agricultor/app.js:14:15)
webservice_api.1.p20qdagcspjc#abner | at Module._compile (internal/modules/cjs/loader.js:689:30)
webservice_api.1.p20qdagcspjc#abner | at Object.Module._extensions..js (internal/modules/cjs/loader.js:700:10)
webservice_api.1.p20qdagcspjc#abner | at Module.load (internal/modules/cjs/loader.js:599:32)
webservice_api.1.p20qdagcspjc#abner | at tryModuleLoad (internal/modules/cjs/loader.js:538:12)
webservice_api.1.p20qdagcspjc#abner | at Function.Module._load (internal/modules/cjs/loader.js:530:3)
webservice_api.1.p20qdagcspjc#abner | at Module.require (internal/modules/cjs/loader.js:637:17)
webservice_api.1.p20qdagcspjc#abner | at require (internal/modules/cjs/helpers.js:20:18)
webservice_api.1.p20qdagcspjc#abner | name: 'ReqlDriverError',
webservice_api.1.p20qdagcspjc#abner | msg:
webservice_api.1.p20qdagcspjc#abner | 'Could not connect to localhost:28015.\nconnect ECONNREFUSED 127.0.0.1:28015',
webservice_api.1.p20qdagcspjc#abner | frames: undefined,
webservice_api.1.p20qdagcspjc#abner | message:
webservice_api.1.p20qdagcspjc#abner | 'Could not connect to localhost:28015.\nconnect ECONNREFUSED 127.0.0.1:28015' }
docker-compose does inter-service communication by service name, so the value of DB_HOST should be db.
On a side note, unless you need to expose the database outside of the stack, you do not need the port mapping.
#Alex Karshin It is unnecessary to fully specify the container name. The first example in the docker-compose networking docs shows how simple it really is.
spawnia has a point, but might not have the right answer. If you take a look at docker ps -a the name of your database container is webservice_db. Therefore, you will not be successful if you try to connect to rethinkdb on localhost (cuz obviously it's not on localhost).
You must either hardcode the container name (webservice_db) to your config file, or do set it in docker-compose.yml. But if you do, I suggest you set the container names explicitly:
version: "3"
...
services:
db:
container_name: webservice_db
...
api:
container_name: webservice_api
environment:
- NODE_ENV=development
- PORT=80
- DB_HOST= webservice_db
- DB_PORT=28015
...
There, now it should work normally.