I get the following error (for each node), when I run the command docker-compose up. I configured the network parameters myself as well as the nodes, not using the network bootstrapper.
[ERROR] 08:07:48+0000 [main] internal.NodeStartupLogging.invoke - Exception during node startup: Serialization scheme ([6D696E696D756D], P2
P) not supported. [errorCode=1e6peth, moreInformationAt=https://errors.corda.net/OS/4.0/1e6peth]
I have tried to change the properties in the network-parameters file, yet unsuccessfully so far.
Here are my config files:
myLegalName : "O=Notary, L=London, C=GB"
p2pAddress : "localhost:10008"
devMode : true
notary : {
validating : false
}
rpcSettings = {
address : "notary:10003"
adminAddress : "notary:10004"
}
rpcUsers=[
{
user="user"
password="test"
permissions=[
ALL
]
}
]
detectPublicIp : false
myLegalName : "O=PartyA, L=London, C=GB"
p2pAddress : "localhost:10005"
devMode : true
rpcSettings = {
address : "partya:10003"
adminAddress : "partya:10004"
}
rpcUsers=[
{
user=corda
password=corda_initial_password
permissions=[
ALL
]
}
]
detectPublicIp : false
myLegalName : "O=PartyB, L=London, C=GB"
p2pAddress : "localhost:10006"
devMode : true
rpcSettings = {
address : "partyb:10003"
adminAddress : "partyb:10004"
}
rpcUsers=[
{
user=corda
password=corda_initial_password
permissions=[
ALL
]
}
]
detectPublicIp : false
as well as my network-parameters file and my docker-compose.yml file:
minimumPlatformVersion=4
notaries=[NotaryInfo(identity=O=Notary, L=London, C=GB, validating=false)]
maxMessageSize=10485760
maxTransactionSize=524288000
whitelistedContractImplementations {
}
eventHorizon="30 days"
epoch=1
version: '3.7'
services:
Notary:
image: corda/corda-zulu-4.0:latest
container_name: Notary
networks:
- corda
volumes:
- ./nodes/notary_node.conf:/etc/corda/node.conf
- ./nodes/network-parameters:/opt/corda/network-parameters
PartyA:
image: corda/corda-zulu-4.0:latest
container_name: PartyA
networks:
- corda
volumes:
- ./nodes/partya_node.conf:/etc/corda/node.conf
- ./nodes/network-parameters:/opt/corda/network-parameters
- ./build/libs/:/opt/corda/cordapps
PartyB:
image: corda/corda-zulu-4.0:latest
container_name: PartyB
networks:
- corda
volumes:
- ./nodes/partyb_node.conf:/etc/corda/node.conf
- ./nodes/network-parameters:/opt/corda/network-parameters
- ./build/libs/:/opt/corda/cordapps
networks:
corda:
Many thanks in advance for your help!
It looks like it is indeed the issue with missing serialization scheme.
Also, in our most Corda 4.4 release, we have released an official image of the containerized Corda node.
Feel free to check out our most recent guide on how to start a docker format node. https://medium.com/corda/containerising-corda-with-corda-docker-image-and-docker-compose-af32d3e8746c
Related
I have created a docker-compose.yml for Elasticsearch and Kibana as shown below. The docker-compose.yml is working fine but with no index and data. I remember in one of my database specific docker-compose.yml I adds data like as shown below and within the docker/db folder I places my dq sql scripts
services:
postgres:
image: postgres:9.6
volumes:
- ./docker/db:/docker-entrypoint-initdb.d
environment:
POSTGRES_DB: some_db
ports:
- 5432:5432
Now the question similarly to be above way how do I specify ES index and data volume, what should be the ES file extension
To be more specific I want the below ES index and data to be there when the elasticsearch is started
PUT test
POST _bulk
{ "index" : { "_index" : "test"} }
{ "name" : "A" }
{ "index" : { "_index" : "test"} }
{ "name" : "B" }
{ "index" : { "_index" : "test"} }
{ "name" : "C" }
{ "index" : { "_index" : "test"} }
{ "name" : "D" }
docker-compose.yml
version: '3.7'
services:
# Elasticsearch Docker Images: https://www.docker.elastic.co/
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0
container_name: elasticsearch
environment:
- xpack.security.enabled=false
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
cap_add:
- IPC_LOCK
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
kibana:
container_name: kibana
image: docker.elastic.co/kibana/kibana:7.4.0
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- 5601:5601
depends_on:
- elasticsearch
volumes:
elasticsearch-data:
driver: local
tell me about the docker:
I have Windows 10+WSL2+docker for win, installed selenoid in ubuntu, launched and downloaded the images. (chrome 90,91 etc..)
The container aero cube/selenoid and aerocube/selenoid-ui is successfully launched, the tests in it from IDEA pass with a bang.
I want to run tests in 2 versions of chrome via docker compose.
Config browser.json
{
"chrome": {
"default": "90.0",
"versions": {
"90.0": {
"env" : ["LANG=ru_RU.UTF-8", "LANGUAGE=ru:en", "LC_ALL=ru_RU.UTF-8", "TZ=Europe/Moscow"],
"image": "selenoid/chrome:90.0",
"tmpfs": {"/tmp": "size=512m"},
"hosts": ["x01.aidata.io:127.0.0.1"],
"port": "4444"
},
"91.0": {
"env": ["LANG=ru_RU.UTF-8", "LANGUAGE=ru:en", "LC_ALL=ru_RU.UTF-8", "TZ=Europe/Moscow"],
"image": "selenoid/chrome:91.0",
"tmpfs": {"/tmp": "size=512m"},
"hosts": ["x01.aidata.io:127.0.0.1"],
"port": "4444"
}
}
}
}
Config docker-compose.yaml
version: '3.4'
services:
selenoid:
image: aerokube/selenoid:latest-release
volumes:
- "${PWD}/init/selenoid:/etc/selenoid"
- "${PWD}/work/selenoid/video:/opt/selenoid/video"
- "${PWD}/work/selenoid/logs:/opt/selenoid/logs"
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- OVERRIDE_VIDEO_OUTPUT_DIR=work/selenoid/video
command: ["-conf", "etc/selenoid/browsers.json", "-video-output-dir", "/opt/selenoid/video", "-log-output-dir", "/opt/selenoid/logs"]
ports:
- "4444:4444"
network_mode: bridge
in IDEA:
#BeforeEach
public void initDriver() throws IOException {
final String url = "http://localhost:4444/wd/hub";
WebDriver driver = new RemoteWebDriver(new URL(url), DesiredCapabilities.chrome());
driver.manage().window().setSize(new Dimension(1920,1024));
WebDriverRunner.setWebDriver(driver);
}
#AfterEach
public void stopDriver() {
Optional.ofNullable(WebDriverRunner.getWebDriver()).ifPresent(WebDriver::quit);
}
It starts only the 90th version (it is the first in browser.json) passes successfully and closes ignoring everything else that needs to be corrected? )
with the docker, everything is OK, we figured it out, you will need to edit the configs of the grad for selenide
close topic
My goal is to create seeds of users when the database is created.
I'm using idserver4, with npgsql, docker-compose.
The current behavior creates the database and as well the identityserver user manager tables (AspNetUsers, AspNetUserTokens, AspNetUserRoles, etc..). So I know it's migrating that data to the database. But it skips over the Task of running the User seed because it throws a password exception:
Npgsql.NpgsqlException (0x80004005): No password has been provided but the backend requires one (in MD5)
Here's the code in my Program.cs.
public static void Main(string[] args)
{
var host = CreateHostBuilder(args).Build();
using (var scope = host.Services.CreateScope())
{
var services = scope.ServiceProvider;
try
{
var userManager = services.GetRequiredService<UserManager<User>>();
var roleManager = services.GetRequiredService<RoleManager<IdentityRole>>();
var context = services.GetRequiredService<ApplicationDbContext>();
context.Database.Migrate(); // ERROR HAPPENS HERE
Task.Run(async () => await UserAndRoleSeeder.SeedUsersAndRoles(roleManager, userManager)).Wait(); // I NEED THIS TO RUN
}
catch (Exception ex)
{
var logger = services.GetRequiredService<ILogger<Program>>();
logger.LogError(ex, "Error has occured while migrating to the database.");
}
}
host.Run();
}
Here is the code where it gets the connection string in Startup.cs:
services.AddDbContext<ApplicationDbContext>(options =>
{
options.UseNpgsql(Configuration.GetConnectionString("DefaultConnection"),
b =>
{
b.MigrationsAssembly("GLFManager.App");
});
});
If I use a breakpoint here, it shows that the connection string was obtained along with the user id and password. I verified the password was correct. Or else I don't think it would initially commit the Idserver user manager tables.
Here is my appsettings.json file where the connection string lives:
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
},
"AllowedHosts": "*",
"ConnectionStrings": {
"DefaultConnection": "Host=localhost;Port=33010;Database=glfdb;User Id=devdbuser;Password=devdbpassword"
}
}
I'm thinking it's somewhere in the docker-compose file where some configuration is not registering. This is the docker-compose file:
version: '3.4'
services:
glfmanager.api:
image: ${DOCKER_REGISTRY-}glfmanagerapi
container_name: "glfmanager.api"
build:
context: .
dockerfile: GLFManager.Api/Dockerfile
ports:
- "33000:80"
- "33001:443"
environment:
- ConnectionStrings__DefaultConnection=Server=glfmanager.db;Database=glfdb;User Id=devdbuser:password=devdbpassword;
- Identity_Authority=http://glfmanager.auth
volumes:
- .:/usr/src/app
depends_on:
- "glfmanager.db"
glfmanager.auth:
image: ${DOCKER_REGISTRY-}glfmanagerauth
container_name: "glfmanager.auth"
build:
context: .
dockerfile: GLFManager.Auth/Dockerfile
ports:
- "33005:80"
- "33006:443"
environment:
- ConnectionStrings__DefaultConnection=Server=glfmanager.db;Database=glfdb;User Id=devdbuser:password=devdbpassword;
volumes:
- .:/usr/src/app
depends_on:
- "glfmanager.db"
glfmanager.db:
restart: on-failure
image: "mdillon/postgis:11"
container_name: "glfmanager.db"
environment:
- POSTGRES_USER=devdbuser
- POSTGRES_DB=glfdb
- POSTGRES_PASSWORD=devdbpassword
volumes:
- glfmanager-db:/var/lib/postresql/data
ports:
- "33010:5432"
volumes:
glfmanager-db:
I used this code from a class I took on backend developing and the code is Identitcal to the project I've built in that, and it works. So I'm stumped as to why this is giving me that password error.
Found the problem. I used a ':' instead of ';' in my docker file between User Id and password
I have a dockerized back-end with golang gin server, postgresql and redis.
Everything starts correctly with this docker-compose.yaml file :
version: '3.9'
services:
postgresql:
image: 'postgres:13.1-alpine'
volumes:
- data:/var/lib/postgresql/data
env_file:
- ./env/postgre.env
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
ports:
- '5432:5432'
server:
build: ./server
ports:
- '8000:8000'
volumes:
- ./server:/app
depends_on:
- postgresql
redis:
image: "redis"
ports:
- "6379:6379"
volumes:
- $PWD/redis-data:/var/lib/redis
volumes:
data:
Than I initialize redis in main func :
func main() {
util.InitializeRedis()
(...)
// InitializeRedis func
func newPool() *redis.Pool {
return &redis.Pool{
MaxIdle:3,
IdleTimeout:240 * time.Second,
DialContext: func(context.Context) (redis.Conn, error) {
return redis.Dial("tcp",":6379")
},
}
}
var (
pool *redis.Pool
)
func InitializeRedis() {
flag.Parse()
pool = newPool()
}
It doesn't prompt any error, but I cannot get connection with pool.Get in another function :
// Handle "/redis" for test
router.GET("/redis", util.ServeHome)
// ServeHome func
func ServeHome(ctx *gin.Context){
conn := pool.Get()
defer conn.Close()
var p1 struct{
Title string `redis:"title" json:"title"`
Author string `redis:"author" json:"author"`
Body string `redis:"body" json:"body"`
}
p1.Title = "Example"
p1.Author = "Gary"
p1.Body = "Hello"
if _, err := conn.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil {
log.Fatalf("Error occured with redis HMSEET, %v", err) // Error in console is from here
return
}
(...)
And when I try to access /redis with Insomnia it shows: Error: Server returned nothing (no headers, no data) and in console logs : Error occured with redis HMSEET, dial tcp :6379: connect: connection refused
I couldn't find any article which solve this problem for me, so I do appreciate any help.
Since you're using docker-compose Redis won't be available on :6379, instead it will be available on the hostname redis.
I think you'll need to update your code to the following:
redis.Dial("tcp","redis:6379")
I have the following simple gradle task which I have created based on the information found here.
I have the following gradle task:
task prepareDockerNodes(type: net.corda.plugins.Dockerform, dependsOn: ['jar']) {
nodeDefaults{
cordapp project(':tcw-contracts-states')
cordapp project(':tcw-cordapp')
}
node {
name "O=Notary Service,L=Zurich,C=CH"
notary = [validating : false]
rpcUsers = rpcUsersList
useTestClock true
}
node {
name "O=Bank A,L=London,C=GB"
rpcUsers = rpcUsersList
useTestClock true
}
node {
name "O=Bank B,L=New York,C=US"
rpcUsers = rpcUsersList
useTestClock true
}
}
but when I run it, I get the following error:
No configuration setting found for key 'p2pAddress'
than I add p2pAddress "localhost" to all node's but after that I start to get this error:
File 'build/nodes/docker-compose.yml' specified for property 'dockerComposePath' does not exist.
can you please help me to get the task working, or give me a working example to fine out myself?
You're pretty close.
First make sure you've run ./gradlew deployNodes so that the node folder structure & files have been generated
Next the dockerform task in your build.gradle should look something like:
task prepareDockerNodes(type: net.corda.plugins.Dockerform, dependsOn: ['jar']) {
nodeDefaults {
cordapp project(":contracts-java")
}
node {
name "O=Notary,L=London,C=GB"
notary = [validating : false]
p2pPort 10002
rpcSettings {
address("localhost:10003")
adminAddress("localhost:10023")
}
projectCordapp {
deploy = false
}
cordapps.clear()
}
node {
name "O=PartyA,L=London,C=GB"
p2pPort 10002
rpcSettings {
address("localhost:10003")
adminAddress("localhost:10023")
}
rpcUsers = [[user: "user1", "password": "test", "permissions": ["ALL"]]]
}
node {
name "O=PartyB,L=New York,C=US"
p2pPort 10002
rpcSettings {
address("localhost:10003")
adminAddress("localhost:10023")
}
rpcUsers = [[user: "user1", "password": "test", "permissions": ["ALL"]]]
}
}
Once deployNodes is done, create an empty docker-compose.yml file to work around the error you've gotten: touch workflows-java/build/nodes/docker-compose.yml
Then you can run ./gradlew prepareDockerNodes. Once that's done, edit the generated docker-compose.yml file to change the ports:
version: '3'
services:
notary:
build: /Users/chrischabot/Projects/json-cordapp/workflows-java/build/nodes/Notary
ports:
- "10002"
- "10003"
partya:
build: /Users/chrischabot/Projects/json-cordapp/workflows-java/build/nodes/PartyA
ports:
- "10002"
- "10003"
partyb:
build: /Users/chrischabot/Projects/json-cordapp/workflows-java/build/nodes/PartyB
ports:
- "10002"
- "10003"
And you should have a working situation again