Hyperledger fabric multihost setup for first-network example - hyperledger

I am trying to setup first-network example on a Multihost environment using docker swarm with below configuration to begin with:
HOST1
Orderer
Org1-pee0
Org1-peer1
CLI
HOST2
Org2-pee0
Org2-pee1
I have only changed the docker-compose-cli.yaml to make it compatible with swarm(code given below). I am not able to add the Host2 / Org2 peers to channel.
Executing the below steps in order:
byfn -m generate
docker stack deploy --compose-file docker-compose-cli.yaml overnet
Enter the CLI docker and execute ./scripts/script.sh mychannel
I keep getting the below error
2017-08-15 02:42:49.512 UTC [msp] GetDefaultSigningIdentity -> DEBU 006 Obtaining default signing identity
Error: Error getting endorser client channel: PER:404 - Error trying to connect to local peer
/opt/gopath/src/github.com/hyperledger/fabric/peer/common/common.go:116 github.com/hyperledger/fabric/peer/common.GetEndorserClient
/opt/gopath/src/github.com/hyperledger/fabric/peer/channel/channel.go:149 github.com/hyperledger/fabric/peer/channel.InitCmdFactory
/opt/gopath/src/github.com/hyperledger/fabric/peer/channel/join.go:138 github.com/hyperledger/fabric/peer/channel.join
/opt/gopath/src/github.com/hyperledger/fabric/peer/channel/join.go:42 github.com/hyperledger/fabric/peer/channel.joinCmd.func1
/opt/gopath/src/github.com/hyperledger/fabric/vendor/github.com/spf13/cobra/command.go:599 github.com/hyperledger/fabric/vendor/github.com/spf13/cobra.(*Command).execute
/opt/gopath/src/github.com/hyperledger/fabric/vendor/github.com/spf13/cobra/command.go:689 github.com/hyperledger/fabric/vendor/github.com/spf13/cobra.(*Command).ExecuteC
/opt/gopath/src/github.com/hyperledger/fabric/vendor/github.com/spf13/cobra/command.go:648 github.com/hyperledger/fabric/vendor/github.com/spf13/cobra.(*Command).Execute
/opt/gopath/src/github.com/hyperledger/fabric/peer/main.go:118 main.main
/opt/go/src/runtime/proc.go:192 runtime.main
/opt/go/src/runtime/asm_amd64.s:2087 runtime.goexit
Caused by: x509: certificate is valid for peer0.org1.example.com, peer0, not peer0.org2.example.com
docker-compose-cli.yaml
Orderer
version: '3'
networks:
overnet:
services:
orderer_example_com:
image: hyperledger/fabric-orderer
environment:
- ORDERER_GENERAL_LOGLEVEL=debug
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
# enabled TLS
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
working_dir: /opt/gopath/src/github.com/hyperledger/fabric
command: orderer
volumes:
- ./channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/example.com/orderers/orderer.example.com/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/example.com/orderers/orderer.example.com/tls/:/var/hyperledger/orderer/tls
ports:
- 7050:7050
# - 7049:7049
networks:
- overnet
deploy:
replicas: 1
placement:
constraints: [node.role == manager]
Org1 Peers
peer0_org1_example_com:
image: hyperledger/fabric-peer
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/etc/hyperledger/fabric/tls
ports:
- 7051:7051
- 7053:7053
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
# the following setting starts chaincode containers on the same
# bridge network as the peers
# https://docs.docker.com/compose/networking/
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=overnet
- CORE_LOGGING_LEVEL=DEBUG
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
- CORE_PEER_ID=peer0.org1.example.com
- CORE_PEER_ADDRESS=peer0.org1.example.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.example.com:7051
- CORE_PEER_LOCALMSPID=Org1MSP
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
networks:
- overnet
deploy:
replicas: 1
placement:
constraints: [node.role == manager]
Org2 Peers
peer0_org2_example_com:
image: hyperledger/fabric-peer
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
# the following setting starts chaincode containers on the same
# bridge network as the peers
# https://docs.docker.com/compose/networking/
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=overnet
- CORE_LOGGING_LEVEL=DEBUG
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
- CORE_PEER_ID=peer0.org2.example.com
- CORE_PEER_ADDRESS=peer0.org2.example.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.example.com:7051
- CORE_PEER_LOCALMSPID=Org2MSP
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/msp:/etc/hyperledger/fabric/msp
- ./crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls:/etc/hyperledger/fabric/tls
ports:
- 9051:7051
- 9053:7053
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
networks:
- overnet
deploy:
mode: replicated
replicas: 1
placement:
constraints: [node.role == worker]
CLI
cli:
image: hyperledger/fabric-tools
tty: true
environment:
- GOPATH=/opt/gopath
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
- CORE_LOGGING_LEVEL=DEBUG
- CORE_PEER_ID=cli
- CORE_PEER_ADDRESS=peer0.org2.example.com:7051
- CORE_PEER_LOCALMSPID=Org1MSP
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.example.com/peers/peer0.org4.example.com/tls/ca.crt
- CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.example.com/users/Admin#org2.example.com/msp
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
# command: /bin/bash -c './scripts/script.sh ${CHANNEL_NAME}; sleep $TIMEOUT'
volumes:
- /var/run/:/host/var/run/
- ./chaincode/:/opt/gopath/src/github.com/hyperledger/fabric/examples/chaincode/go
- ./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/
- ./scripts:/opt/gopath/src/github.com/hyperledger/fabric/peer/scripts/
- ./channel-artifacts:/opt/gopath/src/github.com/hyperledger/fabric/peer/channel-artifacts
depends_on:
- orderer_example_com
- peer0_org1_example_com
- peer1_org1_example_com
- peer0_org2_example_com
- peer1_org2_example_com
networks:
- overnet
deploy:
replicas: 1
placement:
constraints: [node.role == manager]
crypto-config.yaml (Did not make any changes this file, however attaching here for reference)
OrdererOrgs:
# ------------------------------------------------------------------
# Orderer
# ------------------------------------------------------------------
- Name: Orderer
Domain: example.com
# ----------------------------------------------------------------
# "Specs" - See PeerOrgs below for complete description
# ----------------------------------------------------------------
Specs:
- Hostname: orderer
# --------------------------------------------------------------------
# "PeerOrgs" - Definition of organizations managing peer nodes
# --------------------------------------------------------------------
PeerOrgs:
# ------------------------------------------------------------------
# Org1
# ------------------------------------------------------------------
- Name: Org1
Domain: org1.example.com
# ----------------------------------------------------------------
# "Specs"
# ----------------------------------------------------------------
# Uncomment this section to enable the explicit definition of hosts in your
# configuration. Most users will want to use Template, below
#
# Specs is an array of Spec entries. Each Spec entry consists of two fields:
# - Hostname: (Required) The desired hostname, sans the domain.
# - CommonName: (Optional) Specifies the template or explicit override for
# the CN. By default, this is the template:
#
# "{{.Hostname}}.{{.Domain}}"
#
# which obtains its values from the Spec.Hostname and
# Org.Domain, respectively.
# ---------------------------------------------------------------------------
# Specs:
# - Hostname: foo # implicitly "foo.org2.example.com"
# CommonName: foo27.org5.example.com # overrides Hostname-based FQDN set above
# - Hostname: bar
# - Hostname: baz
# ---------------------------------------------------------------------------
# "Template"
# ---------------------------------------------------------------------------
# Allows for the definition of 1 or more hosts that are created sequentially
# from a template. By default, this looks like "peer%d" from 0 to Count-1.
# You may override the number of nodes (Count), the starting index (Start)
# or the template used to construct the name (Hostname).
#
# Note: Template and Specs are not mutually exclusive. You may define both
# sections and the aggregate nodes will be created for you. Take care with
# name collisions
# ---------------------------------------------------------------------------
Template:
Count: 2
# Start: 5
# Hostname: {{.Prefix}}{{.Index}} # default
# ---------------------------------------------------------------------------
# "Users"
# ---------------------------------------------------------------------------
# Count: The number of user accounts _in addition_ to Admin
# ---------------------------------------------------------------------------
Users:
Count: 1
# ------------------------------------------------------------------
# Org2: See "Org1" for full specification
# ------------------------------------------------------------------
- Name: Org2
Domain: org2.example.com
Template:
Count: 2
Users:
Count: 1

I was able to host hyperledger fabric network on multiple machines using docker swarm mode. Swarm mode provides a network across multiple hosts/machines for the communication of the fabric network components.
This post explains the deployment process.It creates a swarm network and all the other machines join the network. https://medium.com/#wahabjawed/hyperledger-fabric-on-multiple-hosts-a33b08ef24f

I have set up the mutlihost setup of fabric network. My orderer and one peer is on one host and one peer is on 2nd host. For this we need to make changed in configtx.yml file for orderer section:
Profiles:
CommonOrgsOrdererGenesis:
Orderer:
<<: *OrdererDefaults
Organizations:
- *OrdererOrg
Consortiums:
SampleConsortiumJA:
Organizations:
- *test
- *mch
- *test2
- *test3
CommonOrgChannel:
Consortium: SampleConsortiumJA
Application:
<<: *ApplicationDefaults
Organizations:
- *test
- *mch
- *test2
- *test3
MJAOrgsOrdererGenesis:
Orderer:
<<: *OrdererDefaults
Organizations:
- *OrdererOrg
Consortiums:
SampleConsortiumJA:
Organizations:
- *test
- *mch
- *test2
MJAOrgChannel:
Consortium: SampleConsortiumJA
Application:
<<: *ApplicationDefaults
Organizations:
- *test
- *mch
- *test2
MABOrgsOrdererGenesis:
Orderer:
<<: *OrdererDefaults
Organizations:
- *OrdererOrg
Consortiums:
SampleConsortiumAB:
Organizations:
- *test2
- *mch
- *test3
MABOrgChannel:
Consortium: SampleConsortiumAB
Application:
<<: *ApplicationDefaults
Organizations:
- *test
- *mch
- *test3
MBJOrgsOrdererGenesis:
Orderer:
<<: *OrdererDefaults
Organizations:
- *OrdererOrg
Consortiums:
SampleConsortiumBJ:
Organizations:
- *test3
- *mch
- *test
MBJOrgChannel:
Consortium: SampleConsortiumBJ
Application:
<<: *ApplicationDefaults
Organizations:
- *test3
- *mch
- *test
Organizations:
- &OrdererOrg
Name: OrdererOrg
# ID to load the MSP definition as
ID: OrdererMSP
MSPDir: crypto-config/ordererOrganizations/mch.test/msp
- &test
Name: test
# ID to load the MSP definition as
ID: testMSP
MSPDir: crypto-config/peerOrganizations/test.test/msp
AnchorPeers:
- Host: peer0.test.test
Port: 7054
- &airtel
# DefaultOrg defines the organization which is used in the sampleconfig
# of the fabric.git development environment
Name: airtel
# ID to load the MSP definition as
ID: test2MSP
MSPDir: crypto-config/peerOrganizations/test2.test/msp
Anc
- Host: peer0.test2.test
Port: 7055
- &bsnl
# DefaultOrg defines the organization which is used in the sampleconfig
# of the fabric.git development environment
Name: test3
# ID to load the MSP definition as
ID: test3MSP
MSPDir: crypto-config/peerOrganizations/test3.test/msp
AnchorPeers:
- Host: peer0.test3.test
Port: 7059
- &mch
Name: mch
# ID to load the MSP definition as
ID: mchMSP
MSPDir: crypto-config/peerOrganizations/mch.test/msp
AnchorPeers:
- Host: peer0.mch.test
Port: 7051
Orderer: &OrdererDefaults
OrdererType: solo
Addresses:
- 10.64.253.213:7050
# Batch Timeout: The amount of time to wait before creating a batch
BatchTimeout: 2s
# Batch Size: Controls the number of messages batched into a block
BatchSize:
MaxMessageCount: 10
AbsoluteMaxBytes: 99 MB
PreferredMaxBytes: 512 KB
Kafka:
Brokers:
- 127.0.0.1:9092
Organizations:
Application: &ApplicationDefaults
Organizations:
===============================================================
after this pull up the orderer and peer1 on one server and peer2 on different server. Create channel using IP of orderer instead of name then copy the channel file to other peer also and join both peers one at a time. Install chaincode on two peers. You are good to go.

You have to use Docker-swarm to implement MultiHost Hyperledger fabric Blockchain Network.
Read the steps from the Following URL.
https://github.com/chudsonsolomon/Block-chain-Swarm-Multi-Host

First of all, I think that you don't have to
Enter the CLI docker and execute ./scripts/script.sh mychannel
Or have you commented the docker compose file like is described in the "Start the network" step?
On the other hand, I tell you that I have achieved to setting up a Multihost environment using docker. However, instead of defining the network overlay, I defined the network_mode: host for all the docker containers that I'm going to start up.
Could you show the logs that are appearing in the Peer and in the Orderer?

I guess the problem came from the service name of docker-compose:
orderer_example_com => orderer.example.com
peer0_org1_example_com => peer0.org1.example.com
...
Let use dot (.) not underscore (_) for naming.
Read wikipedia for more
You also need docker-swarm for multiple host setup

Related

Promtail: Loki Server returned HTTP status 429 Too Many Requests

I'm running Loki for test purposes in Docker and am recently getting following error from the Promtail and Loki containers:
level=warn ts=2022-02-18T09:41:39.186511145Z caller=client.go:349 component=client host=loki:3100 msg="error sending batch, will retry" status=429 error="server returned HTTP status 429 Too Many Requests (429): Maximum active stream limit exceeded, reduce the number of active streams (reduce labels or reduce label values), or contact your Loki administrator to see if the limit can be increased"
I have tried increasing limit settings (ingestion_rate_mb and ingestion_burst_size_mb) in my Loki config.
I setup two Promtail jobs - one job ingesting MS Exchange logs from a local directory (currently 8TB and increasing), the other job gets logs spooled from syslog-ng.
I've read that reducing labels help. But I'm only using two labels.
Configuration
Below my config files (docker-compose, loki, promtail):
docker-compose.yaml
version: "3"
networks:
loki:
services:
loki:
image: grafana/loki:2.4.2
container_name: loki
restart: always
user: "10001:10001"
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- ${DATADIR}/loki/etc:/etc/loki:rw
- ${DATADIR}/loki/chunks:/loki/chunks
networks:
- loki
promtail:
image: grafana/promtail:2.4.2
container_name: promtail
restart: always
volumes:
- /var/log/loki:/var/log/loki
- ${DATADIR}/promtail/etc:/etc/promtail
ports:
- "1514:1514" # for syslog-ng
- "9080:9080" # for http web interface
command: -config.file=/etc/promtail/config.yml
networks:
- loki
grafana:
image: grafana/grafana:8.3.4
container_name: grafana
restart: always
user: "476:0"
volumes:
- ${DATADIR}/grafana/var:/var/lib/grafana
ports:
- "3000:3000"
networks:
- loki
Loki Config
auth_enabled: false
server:
http_listen_port: 3100
common:
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093
# https://grafana.com/docs/loki/latest/configuration/#limits_config
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
ingestion_rate_mb: 12
ingestion_burst_size_mb: 24
per_stream_rate_limit: 24MB
chunk_store_config:
max_look_back_period: 336h
table_manager:
retention_deletes_enabled: true
retention_period: 2190h
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_encoding: snappy
Promtail Config
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: exchange
static_configs:
- targets:
- localhost
labels:
job: exchange
__path__: /var/log/loki/exchange/*/*/*log
- job_name: syslog-ng
syslog:
listen_address: 0.0.0.0:1514
idle_timeout: 60s
label_structured_data: yes
labels:
job: "syslog-ng"
relabel_configs:
- source_labels: ['__syslog_message_hostname']
target_label: 'host'

How to connect Winlogbeat to Elasticsearch dockrized Cluster using SSL?

For the past week I am trying to connect a Winlogbeat(Which is on my host machine) To an elasticsearch Cluster that I set up on an Ubuntu VM using dockers.
Following this tutorial. (In the tutorial they don't explain how to connect a Beat)
My problem is with the SSL configuration (Of the Winlogbeat) I just can't get it right for some reason.
This is the error I get on the windows machine after running the setup command (.\winlogbeat.exe setup -e) -
2021-02-22T01:42:13.286+0200 ERROR instance/beat.go:971 Exiting: couldn't connect to any of
the configured Elasticsearch hosts. Errors: [error connecting to Elasticsearch at
https://192.168.216.129:9200: Get "https://192.168.216.129:9200": x509: certificate signed by unknown
authority]
Exiting: couldn't connect to any of the configured Elasticsearch hosts. Errors: [error connecting to
Elasticsearch at https://192.168.216.129:9200: Get "https://192.168.216.129:9200": x509: certificate
signed by unknown authority]
And on the Elasticsearch node I get this error -
es01 | "at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-
common-4.1.49.Final.jar:4.1.49.Final]",
es01 | "at java.lang.Thread.run(Thread.java:832) [?:?]",
es01 | "Caused by: javax.net.ssl.SSLHandshakeException: Received fatal alert: bad_certificate",
I tried different methods without any success -
Using openssl to generate self-signed certificate for Winlogbeat (Using this tutorial).
After that I tried to add the new CA I generated to the es_certs volume and I tried to modify the
elastic-docker-tls.yml so it will except the new CA (I failed at that).
I changed the instances.yml file by adding a winlogbeat section -
- name: winlogbeat
dns:
- <My Computer Name>
ip:
- 192.168.1.136
and ran docker-compose -f create-certs.yml run --rm create_certs on a fresh install of the stack which
resulted in the creation of a winlogbeat.crt and winlogbeat.key but still it didn't work.
I also tried to play with the verfication_mode changing it to "none" but it didn't work either.
I don't know what else to try and I failed to find a good source that details the ssl configuration to beats to elk on a docker environment.
This is the elastic-docker-tls.yml file:
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es01/es01.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
- certs:$CERTS_DIR
ports:
- 9200:9200
networks:
- elastic
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: es02
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es02/es02.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
- certs:$CERTS_DIR
networks:
- elastic
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION}
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.license.self_generated.type=trial
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=$CERTS_DIR/es03/es03.key
- xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.http.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.security.transport.ssl.certificate=$CERTS_DIR/es03/es03.crt
- xpack.security.transport.ssl.key=$CERTS_DIR/es03/es03.key
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
- certs:$CERTS_DIR
networks:
- elastic
kib01:
image: docker.elastic.co/kibana/kibana:${VERSION}
container_name: kib01
depends_on: {"es01": {"condition": "service_healthy"}}
ports:
- 5601:5601
environment:
SERVERNAME: localhost
ELASTICSEARCH_URL: https://es01:9200
ELASTICSEARCH_HOSTS: https://es01:9200
ELASTICSEARCH_USERNAME: kibana_system
ELASTICSEARCH_PASSWORD: CHANGEME
ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt
SERVER_SSL_ENABLED: "true"
SERVER_SSL_KEY: $CERTS_DIR/kib01/kib01.key
SERVER_SSL_CERTIFICATE: $CERTS_DIR/kib01/kib01.crt
volumes:
- certs:$CERTS_DIR
networks:
- elastic
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
certs:
driver: local
networks:
elastic:
driver: bridge
This is the Winlogbeat configuration
###################### Winlogbeat Configuration Example ########################
winlogbeat.event_logs:
- name: Application
ignore_older: 72h
- name: System
- name: Security
processors:
- script:
lang: javascript
id: security
file: ${path.home}/module/security/config/winlogbeat-security.js
- name: Microsoft-Windows-Sysmon/Operational
processors:
- script:
lang: javascript
id: sysmon
file: ${path.home}/module/sysmon/config/winlogbeat-sysmon.js
- name: Windows PowerShell
event_id: 400, 403, 600, 800
processors:
- script:
lang: javascript
id: powershell
file: ${path.home}/module/powershell/config/winlogbeat-powershell.js
- name: Microsoft-Windows-PowerShell/Operational
event_id: 4103, 4104, 4105, 4106
processors:
- script:
lang: javascript
id: powershell
file: ${path.home}/module/powershell/config/winlogbeat-powershell.js
- name: ForwardedEvents
tags: [forwarded]
processors:
- script:
when.equals.winlog.channel: Security
lang: javascript
id: security
file: ${path.home}/module/security/config/winlogbeat-security.js
- script:
when.equals.winlog.channel: Microsoft-Windows-Sysmon/Operational
lang: javascript
id: sysmon
file: ${path.home}/module/sysmon/config/winlogbeat-sysmon.js
- script:
when.equals.winlog.channel: Windows PowerShell
lang: javascript
id: powershell
file: ${path.home}/module/powershell/config/winlogbeat-powershell.js
- script:
when.equals.winlog.channel: Microsoft-Windows-PowerShell/Operational
lang: javascript
id: powershell
file: ${path.home}/module/powershell/config/winlogbeat-powershell.js
# ====================== Elasticsearch template settings =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "https://192.168.216.129:5601"
setup.kibana.ssl.enabled: true
setup.kibana.ssl.certificate_authorities: ["C:\\Program Files\\Winlogbeat\\ca.crt"]
setup.kibana.ssl.certificate: "C:\\Program Files\\Winlogbeat\\winlogbeat.crt"
setup.kibana.ssl.key: "C:\\Program Files\\Winlogbeat\\winlogbeat.key"
# verification_mode: none
username: "elastic"
password: "XXXXXXXXXXXXXXXXX"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Winlogbeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["https://192.168.216.129:9200"]
# Protocol - either `http` (default) or `https`.
protocol: "https"
output.elasticsearch.ssl.certificate_authorities: ["C:\\Program Files\\Winlogbeat\\ca.crt"]
output.elasticsearch.ssl.certificate: "C:\\Program Files\\Winlogbeat\\winlogbeat.crt"
output.elasticsearch.ssl.key: "C:\\Program Files\\Winlogbeat\\winlogbeat.key"
# output.elasticsearch.sslverification_mode: none
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
username: "elastic"
password: "XXXXXXXXXXXXXXX"
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# ================================= Processors =================================
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Winlogbeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Winlogbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ============================== Instrumentation ===============================
# Instrumentation support for the winlogbeat.
#instrumentation:
# Set to true to enable instrumentation of winlogbeat.
#enabled: false
# Environment in which winlogbeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true
So it took me some time, but I've figured out what was the problem with my certificate.
I didn't add it to the trusted root store on my windows machine.
In the end I've created a Winlogbeat crt and key using the elasticsearch-certutil tool by adding a Winlogbeat instance to the instances.yml file and copied the winlogbeat.crt, winlogbeat.key and ca.crt to my windows machine.
Note - You can find all of them under /var/lib/docker/volumes/es_certs/_data/
On the windows machine I configured the Winlogbeat the normal way and in the end I've added the ca.crt to the trusted root store using this tutorial.

WSO2is Error after change Keystore - System error while Authenticating/Authorizing User : Error when handling event : PRE_AUTHENTICATION

I am running the WSO2is version 5.8.0 in Docker-Swarm, i script a compose for this mapping the files:
deployment.toml, wsocarbon.jks and directory in servers.
After change the keystore i receive the error on login admin:
System error while Authenticating/Authorizing User : Error when handling event : PRE_AUTHENTICATION
removing the mapping, the SSL Cert is not valid, but i login.
PS: i use traefik to redirect to container.
The stack deploy file:
#IS#
is-hml:
image: wso2/wso2is:5.8.0
ports:
- 4763:4763
- 4443:9443
volumes:
#- /docker/release-hml/wso2/full-identity-server-volume:/home/wso2carbon/wso2is-5.8.0
- /docker/release-hml/wso2/identity-server:/home/wso2carbon/wso2-config-volume
extra_hosts:
- "wso2-hml.valecard.com.br:127.0.0.1"
networks:
traefik_traefik:
aliases:
- is-hml
configs:
#- source: deployment.toml
# target: /home/wso2carbon/wso2is-5.8.0/repository/conf/deployment.toml
#
- source: wso2carbon.jks
target: /home/wso2carbon/wso2is-5.8.0/repository/resources/security/wso2carbon.jks
#- source: catalina-server.xml
# target: /home/wso2carbon/wso2is-5.8.0/repository/conf/tomcat/catalina-server.xml
- source: carbon.xml
target: /home/wso2carbon/wso2is-5.8.0/repository/conf/carbon.xml
#environment:
# - "CATALINA_OPTS=-Xmx2g -Xms2g -XX:MaxPermSize=1024m"
# - "JVM_OPTS=-Xmx2g -Xms2g -XX:MaxPermSize=1024m"
# - "JAVA_OPTS=-Xmx2g -Xms2g"
deploy:
#endpoint_mode: dnsrr
resources:
limits:
cpus: '2'
memory: '4096M'
replicas: 1
labels:
- "traefik.docker.network=traefik_traefik"
- "traefik.backend=is-hml"
- "traefik.port=4443"
- "traefik.frontend.entryPoints=http,https"
- "traefik.frontend.rule=Host:wso2-hml.valecard.com.br"
configs:
deployment.toml:
file: ./wso2-config/deployment.toml
catalina-server.xml:
file: ./wso2-config/catalina-server.xml
wso2carbon.jks:
file: ../../certs/wso2carbon-valecard.jks
carbon.xml:
file: ./wso2-config/carbon.xml
networks:
traefik_traefik:
external: true
The password is some from the deployment.toml
Thz.

Does the port numbers needs to be changed when changing from leveldb to couchdb in Hyperledger-Fabric

I'm working on the frontend of a blockchain app using Hyperledger 1.1 and Chainhero SDK. After changing Hyperledger version from 1.4 to 1.1 (because of missing support of 1.4 from SDK) and changing the default database from levelDb to couchDb it will not initialize. Do you need to change ports when converting from levelDb to couchDb?
I have been working on a chaincode in Hyperledger (HL). I finished developing my back-end with the chaincode and is now working on front-end where I use the Chainhero SDK. Since this SDK is only supporting HL 1.1 I needed to change HL version. After this I forgot updating the version control, and now the network will not boot and comes with the following error output:
Unable to initialize the Fabric SDK: failed to make admin join channel: join channel failed: SendProposal failed: Transaction processing for endorser [localhost:7051]: Endorser Client Status Code: (2) CONNECTION_FAILED. Description: dialing connection timed out [localhost:7051]
When changing from levelDb to couchDb I followed this guide. Which doesn't state anything about changing ports.
I expected it to initialize, but there seems to be a problem. And I not sure if it would help to increase the time, or it's a problem with the ports?
config.yaml
name: "heroes-service-network"
#
# Schema version of the content. Used by the SDK to apply the corresponding parsing rules.
#
version: 1.0.0
#
# The client section used by GO SDK.
#
client:
# Which organization does this application instance belong to? The value must be the name of an org
# defined under "organizations"
organization: org1
logging:
level: info
# Global configuration for peer, event service and orderer timeouts
# if this this section is omitted, then default values will be used (same values as below)
# peer:
# timeout:
# connection: 10s
# response: 180s
# discovery:
# # Expiry period for discovery service greylist filter
# # The channel client will greylist peers that are found to be offline
# # to prevent re-selecting them in subsequent retries.
# # This interval will define how long a peer is greylisted
# greylistExpiry: 10s
# eventService:
# # Event service type (optional). If not specified then the type is automatically
# # determined from channel capabilities.
# type: (deliver|eventhub)
# the below timeouts are commented out to use the default values that are found in
# "pkg/fab/endpointconfig.go"
# the client is free to override the default values by uncommenting and resetting
# the values as they see fit in their config file
# timeout:
# connection: 15s
# registrationResponse: 15s
# orderer:
# timeout:
# connection: 15s
# response: 15s
# global:
# timeout:
# query: 180s
# execute: 180s
# resmgmt: 180s
# cache:
# connectionIdle: 30s
# eventServiceIdle: 2m
# channelConfig: 30m
# channelMembership: 30s
# discovery: 10s
# selection: 10m
# Root of the MSP directories with keys and certs.
cryptoconfig:
path: ${GOPATH}/src/github.com/chainHero/heroes-service/fixtures/crypto-config
# Some SDKs support pluggable KV stores, the properties under "credentialStore"
# are implementation specific
credentialStore:
path: /tmp/heroes-service-store
# [Optional]. Specific to the CryptoSuite implementation used by GO SDK. Software-based implementations
# requiring a key store. PKCS#11 based implementations does not.
cryptoStore:
path: /tmp/heroes-service-msp
# BCCSP config for the client. Used by GO SDK.
BCCSP:
security:
enabled: true
default:
provider: "SW"
hashAlgorithm: "SHA2"
softVerify: true
level: 256
tlsCerts:
# [Optional]. Use system certificate pool when connecting to peers, orderers (for negotiating TLS) Default: false
systemCertPool: false
# [Optional]. Client key and cert for TLS handshake with peers and orderers
client:
keyfile:
certfile:
#
# [Optional]. But most apps would have this section so that channel objects can be constructed
# based on the content below. If an app is creating channels, then it likely will not need this
# section.
#
channels:
# name of the channel
chainhero:
# Required. list of orderers designated by the application to use for transactions on this
# channel. This list can be a result of access control ("org1" can only access "ordererA"), or
# operational decisions to share loads from applications among the orderers. The values must
# be "names" of orgs defined under "organizations/peers"
# deprecated: not recommended, to override any orderer configuration items, entity matchers should be used.
# orderers:
# - orderer.example.com
# Required. list of peers from participating orgs
peers:
peer0.org1.hf.chainhero.io:
# [Optional]. will this peer be sent transaction proposals for endorsement? The peer must
# have the chaincode installed. The app can also use this property to decide which peers
# to send the chaincode install request. Default: true
endorsingPeer: true
# [Optional]. will this peer be sent query proposals? The peer must have the chaincode
# installed. The app can also use this property to decide which peers to send the
# chaincode install request. Default: true
chaincodeQuery: true
# [Optional]. will this peer be sent query proposals that do not require chaincodes, like
# queryBlock(), queryTransaction(), etc. Default: true
ledgerQuery: true
# [Optional]. will this peer be the target of the SDK's listener registration? All peers can
# produce events but the app typically only needs to connect to one to listen to events.
# Default: true
eventSource: true
peer1.org1.hf.chainhero.io:
policies:
#[Optional] options for retrieving channel configuration blocks
queryChannelConfig:
#[Optional] min number of success responses (from targets/peers)
minResponses: 1
#[Optional] channel config will be retrieved for these number of random targets
maxTargets: 1
#[Optional] retry options for query config block
retryOpts:
#[Optional] number of retry attempts
attempts: 5
#[Optional] the back off interval for the first retry attempt
initialBackoff: 500ms
#[Optional] the maximum back off interval for any retry attempt
maxBackoff: 5s
#[Optional] he factor by which the initial back off period is exponentially incremented
backoffFactor: 2.0
#
# list of participating organizations in this network
#
organizations:
org1:
mspid: org1.hf.chainhero.io
cryptoPath: peerOrganizations/org1.hf.chainhero.io/users/{userName}#org1.hf.chainhero.io/msp
peers:
- peer0.org1.hf.chainhero.io
- peer1.org1.hf.chainhero.io
# [Optional]. Certificate Authorities issue certificates for identification purposes in a Fabric based
# network. Typically certificates provisioning is done in a separate process outside of the
# runtime network. Fabric-CA is a special certificate authority that provides a REST APIs for
# dynamic certificate management (enroll, revoke, re-enroll). The following section is only for
# Fabric-CA servers.
certificateAuthorities:
- ca.org1.hf.chainhero.io
#
# List of orderers to send transaction and channel create/update requests to. For the time
# being only one orderer is needed. If more than one is defined, which one get used by the
# SDK is implementation specific. Consult each SDK's documentation for its handling of orderers.
#
orderers:
orderer.hf.chainhero.io:
url: localhost:7050
# these are standard properties defined by the gRPC library
# they will be passed in as-is to gRPC client constructor
grpcOptions:
ssl-target-name-override: orderer.hf.chainhero.io
# These parameters should be set in coordination with the keepalive policy on the server,
# as incompatible settings can result in closing of connection.
# When duration of the 'keep-alive-time' is set to 0 or less the keep alive client parameters are disabled
keep-alive-time: 0s
keep-alive-timeout: 20s
keep-alive-permit: false
fail-fast: false
# allow-insecure will be taken into consideration if address has no protocol defined, if true then grpc or else grpcs
allow-insecure: false
tlsCACerts:
# Certificate location absolute path
path: ${GOPATH}/src/github.com/chainHero/heroes-service/fixtures/crypto-config/ordererOrganizations/hf.chainhero.io/tlsca/tlsca.hf.chainhero.io-cert.pem
#
# List of peers to send various requests to, including endorsement, query
# and event listener registration.
#
peers:
peer0.org1.hf.chainhero.io:
# this URL is used to send endorsement and query requests
url: localhost:7051
# eventUrl is only needed when using eventhub (default is delivery service)
eventUrl: localhost:7053
grpcOptions:
ssl-target-name-override: peer0.org1.hf.chainhero.io
# These parameters should be set in coordination with the keepalive policy on the server,
# as incompatible settings can result in closing of connection.
# When duration of the 'keep-alive-time' is set to 0 or less the keep alive client parameters are disabled
keep-alive-time: 0s
keep-alive-timeout: 20s
keep-alive-permit: false
fail-fast: false
# allow-insecure will be taken into consideration if address has no protocol defined, if true then grpc or else grpcs
allow-insecure: false
tlsCACerts:
# Certificate location absolute path
path: ${GOPATH}/src/github.com/chainHero/heroes-service/fixtures/crypto-config/peerOrganizations/org1.hf.chainhero.io/tlsca/tlsca.org1.hf.chainhero.io-cert.pem
peer1.org1.hf.chainhero.io:
# this URL is used to send endorsement and query requests
url: localhost:8051
# eventUrl is only needed when using eventhub (default is delivery service)
eventUrl: localhost:8053
grpcOptions:
ssl-target-name-override: peer1.org1.hf.chainhero.io
# These parameters should be set in coordination with the keepalive policy on the server,
# as incompatible settings can result in closing of connection.
# When duration of the 'keep-alive-time' is set to 0 or less the keep alive client parameters are disabled
keep-alive-time: 0s
keep-alive-timeout: 20s
keep-alive-permit: false
fail-fast: false
# allow-insecure will be taken into consideration if address has no protocol defined, if true then grpc or else grpcs
allow-insecure: false
tlsCACerts:
# Certificate location absolute path
path: ${GOPATH}/src/github.com/chainHero/heroes-service/fixtures/crypto-config/peerOrganizations/org1.hf.chainhero.io/tlsca/tlsca.org1.hf.chainhero.io-cert.pem
#
# Fabric-CA is a special kind of Certificate Authority provided by Hyperledger Fabric which allows
# certificate management to be done via REST APIs. Application may choose to use a standard
# Certificate Authority instead of Fabric-CA, in which case this section would not be specified.
#
certificateAuthorities:
ca.org1.hf.chainhero.io:
url: http://localhost:7054
# Fabric-CA supports dynamic user enrollment via REST APIs. A "root" user, a.k.a registrar, is
# needed to enroll and invoke new users.
httpOptions:
verify: false
registrar:
enrollId: admin
enrollSecret: adminpw
# [Optional] The optional name of the CA.
caName: ca.org1.hf.chainhero.io
tlsCACerts:
# Certificate location absolute path
path: ${GOPATH}/src/github.com/chainHero/heroes-service/fixtures/crypto-config/peerOrganizations/org1.hf.chainhero.io/ca/ca.org1.hf.chainhero.io-cert.pem
entityMatchers:
peer:
- pattern: (\w*)peer0.org1.hf.chainhero.io(\w*)
urlSubstitutionExp: localhost:7051
eventUrlSubstitutionExp: localhost:7053
sslTargetOverrideUrlSubstitutionExp: peer0.org1.hf.chainhero.io
mappedHost: peer0.org1.hf.chainhero.io
- pattern: (\w*)peer1.org1.hf.chainhero.io(\w*)
urlSubstitutionExp: localhost:8051
eventUrlSubstitutionExp: localhost:8053
sslTargetOverrideUrlSubstitutionExp: peer1.org1.hf.chainhero.io
mappedHost: peer1.org1.hf.chainhero.io
orderer:
- pattern: (\w*)orderer.hf.chainhero.io(\w*)
urlSubstitutionExp: localhost:7050
sslTargetOverrideUrlSubstitutionExp: orderer.hf.chainhero.io
mappedHost: orderer.hf.chainhero.io
certificateAuthorities:
- pattern: (\w*)ca.org1.hf.chainhero.io(\w*)
urlSubstitutionExp: http://localhost:7054
mappedHost: ca.org1.hf.chainhero.io
docker-compose.yaml
version: '2'
networks:
default:
services:
orderer.hf.chainhero.io:
image: hyperledger/fabric-orderer:x86_64-1.1.0
container_name: orderer.hf.chainhero.io
environment:
- ORDERER_GENERAL_LOGLEVEL=debug
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_LISTENPORT=7050
- ORDERER_GENERAL_GENESISPROFILE=ChainHero
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
- ORDERER_GENERAL_LOCALMSPID=hf.chainhero.io
- ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
working_dir: /opt/gopath/src/github.com/hyperledger/fabric-1.1.0
command: orderer
volumes:
- ./artifacts/orderer.genesis.block:/var/hyperledger/orderer/orderer.genesis.block
- ./crypto-config/ordererOrganizations/hf.chainhero.io/orderers/orderer.hf.chainhero.io/msp:/var/hyperledger/orderer/msp
- ./crypto-config/ordererOrganizations/hf.chainhero.io/orderers/orderer.hf.chainhero.io/tls:/var/hyperledger/orderer/tls
ports:
- 7050:7050
networks:
default:
aliases:
- orderer.hf.chainhero.io
ca.org1.hf.chainhero.io:
image: hyperledger/fabric-ca:x86_64-1.1.0
container_name: ca.org1.hf.chainhero.io
environment:
- FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
- FABRIC_CA_SERVER_CA_NAME=ca.org1.hf.chainhero.io
- FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org1.hf.chainhero.io-cert.pem
- FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/5da5c8049a9ec349ec4447c82a9c0f7db75b84012a78954ef083a9c9e4a9fa25_sk
- FABRIC_CA_SERVER_TLS_ENABLED=true
- FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org1.hf.chainhero.io-cert.pem
- FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/5da5c8049a9ec349ec4447c82a9c0f7db75b84012a78954ef083a9c9e4a9fa25_sk
ports:
- 7054:7054
command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
volumes:
- ./crypto-config/peerOrganizations/org1.hf.chainhero.io/ca/:/etc/hyperledger/fabric-ca-server-config
networks:
default:
aliases:
- ca.org1.hf.chainhero.io
peer0.org1.hf.chainhero.io:
image: hyperledger/fabric-peer:x86_64-1.1.0
container_name: peer0.org1.hf.chainhero.io
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
- CORE_VM_DOCKER_ATTACHSTDOUT=true
- CORE_LOGGING_LEVEL=DEBUG
- CORE_PEER_NETWORKID=chainhero
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/var/hyperledger/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/var/hyperledger/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/var/hyperledger/tls/ca.crt
- CORE_PEER_ID=peer0.org1.hf.chainhero.io
- CORE_PEER_ADDRESSAUTODETECT=true
- CORE_PEER_ADDRESS=peer0.org1.hf.chainhero.io:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.hf.chainhero.io:7051
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_GOSSIP_SKIPHANDSHAKE=true
- CORE_PEER_LOCALMSPID=org1.hf.chainhero.io
- CORE_PEER_MSPCONFIGPATH=/var/hyperledger/msp
- CORE_PEER_TLS_SERVERHOSTOVERRIDE=peer0.org1.hf.chainhero.io
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/org1.hf.chainhero.io/peers/peer0.org1.hf.chainhero.io/msp:/var/hyperledger/msp
- ./crypto-config/peerOrganizations/org1.hf.chainhero.io/peers/peer0.org1.hf.chainhero.io/tls:/var/hyperledger/tls
ports:
- 7051:7051
- 7053:7053
depends_on:
- orderer.hf.chainhero.io
links:
- orderer.hf.chainhero.io
networks:
default:
aliases:
- peer0.org1.hf.chainhero.io
peer1.org1.hf.chainhero.io:
image: hyperledger/fabric-peer:x86_64-1.1.0
container_name: peer1.org1.hf.chainhero.io
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
- CORE_VM_DOCKER_ATTACHSTDOUT=true
- CORE_LOGGING_LEVEL=DEBUG
- CORE_PEER_NETWORKID=chainhero
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/var/hyperledger/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/var/hyperledger/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/var/hyperledger/tls/ca.crt
- CORE_PEER_ID=peer1.org1.hf.chainhero.io
- CORE_PEER_ADDRESSAUTODETECT=true
- CORE_PEER_ADDRESS=peer1.org1.hf.chainhero.io:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org1.hf.chainhero.io:7051
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_GOSSIP_SKIPHANDSHAKE=true
- CORE_PEER_LOCALMSPID=org1.hf.chainhero.io
- CORE_PEER_MSPCONFIGPATH=/var/hyperledger/msp
- CORE_PEER_TLS_SERVERHOSTOVERRIDE=peer1.org1.hf.chainhero.io
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
volumes:
- /var/run/:/host/var/run/
- ./crypto-config/peerOrganizations/org1.hf.chainhero.io/peers/peer1.org1.hf.chainhero.io/msp:/var/hyperledger/msp
- ./crypto-config/peerOrganizations/org1.hf.chainhero.io/peers/peer1.org1.hf.chainhero.io/tls:/var/hyperledger/tls
ports:
- 8051:7051
- 8053:7053
depends_on:
- orderer.hf.chainhero.io
links:
- orderer.hf.chainhero.io
networks:
default:
aliases:
- peer1.org1.hf.chainhero.io
docker-compose-couch.yaml
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
services:
couchdb0:
container_name: couchdb0
image: hyperledger/fabric-couchdb
environment:
- COUCHDB_USER=
- COUCHDB_PASSWORD=
# Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
# for example map it to utilize Fauxton User Interface in dev environments.
ports:
- "5984:5984"
peer0.org1.hf.chainhero.io:
environment:
- CORE_LEDGER_STATE_STATEDATABASE=CouchDB
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb0:7051
- CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
- CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
depends_on:
- couchdb0
couchdb1:
container_name: couchdb1
image: hyperledger/fabric-couchdb
environment:
- COUCHDB_USER=
- COUCHDB_PASSWORD=
# Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
# for example map it to utilize Fauxton User Interface in dev environments.
ports:
- "6984:5984"
peer1.org1.hf.chainhero.io:
environment:
- CORE_LEDGER_STATE_STATEDATABASE=CouchDB
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb1:5984
- CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
- CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
depends_on:
- couchdb1
couchdb2:
container_name: couchdb2
image: hyperledger/fabric-couchdb
environment:
- COUCHDB_USER=
- COUCHDB_PASSWORD=
# Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
# for example map it to utilize Fauxton User Interface in dev environments.
ports:
- "7984:5984"
# peer0.org2.example.com:
# environment:
# - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
# - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb2:5984
# - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
# - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
# depends_on:
# - couchdb2
#
# couchdb3:
# container_name: couchdb3
# image: hyperledger/fabric-couchdb
# environment:
# - COUCHDB_USER=
# - COUCHDB_PASSWORD=
# # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
# # for example map it to utilize Fauxton User Interface in dev environments.
# ports:
# - "8984:5984"
#
# peer1.org2.example.com:
# environment:
# - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
# - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb3:5984
# - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
# - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
# depends_on:
# - couchdb3
As assummed it was a problem with the ports in the compose-docker-douch.yaml, were I needed to change:
peer0.org1.hf.chainhero.io:
...
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb0:7051
to
peer0.org1.hf.chainhero.io:
...
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb0:5984

Jenkins installation automation

Old Question
Is that possible to automate Jenkins installation(Jenkins binaries, plugins, credentials) by using any of the configuration management automation tool like Ansible and etc?
Edited
After this question asked I have learned and found many ways to achieve Jenkins Installation. I found docker-compose is interesting to achieve one way of Jenkins Installation automation. So my question is, Is there a better way to automate Jenkins Installation than I am doing, Is there any risk in the way I am handling this automation.
I have taken the advantage of docker Jenkins image and did the automation with docker-compose
Dockerfile
FROM jenkinsci/blueocean
RUN jenkins-plugin-cli --plugins kubernetes workflow-aggregator git configuration-as-code blueocean matrix-auth
docker-compose.yaml
version: '3.7'
services:
dind:
image: docker:dind
privileged: true
networks:
jenkins:
aliases:
- docker
expose:
- "2376"
environment:
- DOCKER_TLS_CERTDIR=/certs
volumes:
- type: volume
source: jenkins-home
target: /var/jenkins_home
- type: volume
source: jenkins-docker-certs
target: /certs/client
jcac:
image: nginx:latest
volumes:
- type: bind
source: ./jcac.yml
target: /usr/share/nginx/html/jcac.yml
networks:
- jenkins
jenkins:
build: .
ports:
- "8080:8080"
- "50000:50000"
environment:
- DOCKER_HOST=tcp://docker:2376
- DOCKER_CERT_PATH=/certs/client
- DOCKER_TLS_VERIFY=1
- JAVA_OPTS="-Djenkins.install.runSetupWizard=false"
- CASC_JENKINS_CONFIG=http://jcac/jcac.yml
- GITHUB_ACCESS_TOKEN=${GITHUB_ACCESS_TOKEN:-fake}
- GITHUB_USERNAME=${GITHUB_USERNAME:-fake}
volumes:
- type: volume
source: jenkins-home
target: /var/jenkins_home
- type: volume
source: jenkins-docker-certs
target: /certs/client
read_only: true
networks:
- jenkins
volumes:
jenkins-home:
jenkins-docker-certs:
networks:
jenkins:
jcac.yaml
credentials:
system:
domainCredentials:
- credentials:
- usernamePassword:
id: "github"
password: ${GITHUB_PASSWORD:-fake}
scope: GLOBAL
username: ${GITHUB_USERNAME:-fake}
- usernamePassword:
id: "slave"
password: ${SSH_PASSWORD:-fake}
username: ${SSH_USERNAME:-fake}
jenkins:
globalNodeProperties:
- envVars:
env:
- key: "BRANCH"
value: "hello"
systemMessage: "Welcome to (one click) Jenkins Automation!"
agentProtocols:
- "JNLP4-connect"
- "Ping"
crumbIssuer:
standard:
excludeClientIPFromCrumb: true
disableRememberMe: false
markupFormatter: "plainText"
mode: NORMAL
myViewsTabBar: "standard"
numExecutors: 4
# nodes:
# - permanent:
# labelString: "slave01"
# launcher:
# ssh:
# credentialsId: "slave"
# host: "worker"
# port: 22
# sshHostKeyVerificationStrategy: "nonVerifyingKeyVerificationStrategy"
# name: "slave01"
# nodeDescription: "SSH Slave 01"
# numExecutors: 3
# remoteFS: "/home/jenkins/workspace"
# retentionStrategy: "always"
securityRealm:
local:
allowsSignup: false
enableCaptcha: false
users:
- id: "admin"
password: "${ADMIN_PASSWORD:-admin123}" #
- id: "user"
password: "${DEFAULTUSER_PASSWORD:-user123}"
authorizationStrategy:
globalMatrix:
permissions:
- "Agent/Build:user"
- "Job/Build:user"
- "Job/Cancel:user"
- "Job/Read:user"
- "Overall/Read:user"
- "View/Read:user"
- "Overall/Read:anonymous"
- "Overall/Administer:admin"
- "Overall/Administer:root"
unclassified:
globalLibraries:
libraries:
- defaultVersion: "master"
implicit: false
name: "jenkins-shared-library"
retriever:
modernSCM:
scm:
git:
remote: "https://github.com/samitkumarpatel/jenkins-shared-libs.git"
traits:
- "gitBranchDiscovery"
The command to start and stop Jenkins are
# start Jenkins
docker-compose up -d
# stop Jenkins
docker-compose down
Sure it is :) For Ansible you can always check Ansible Galaxy whenever you want to automate installation of something. Here is the most popular role for installing Jenkins. And here is its GitHub repo

Resources