I have a linux machine (Host-591) hosting 2 VM instances. In the first VM instance (Docker03), inside a container, I have a Flask web server hosted, which runs on tcp/81 and publishes it. The host machine, Docker03 maps it to port tcp/5003.
From Docker03, I am able to access the Flask server:
root#Docker03:/home/ubuntu/docker/app3# curl http://192.168.122.103:5003/
Hello Root!
root#Docker03:/home/ubuntu/docker/app3#
But from the host linux machine, I am not able to access the server.
[root#Host-591 ~]# curl http://192.168.122.103:5003/
^C
Ping from the host machine to the server in the container inside Docker03 works fine.
[root#Host-591 ~]# ping 192.168.122.103 -c 1
PING 192.168.122.103 (192.168.122.103) 56(84) bytes of data.
64 bytes from 192.168.122.103: icmp_seq=1 ttl=64 time=0.225 ms
--- 192.168.122.103 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.225/0.225/0.225/0.000 ms
A simple telnet from the Host machine to the Flask server's port shows TCP connection can be established:
[root#Host-591 ~]# telnet 192.168.122.103 5003
Trying 192.168.122.103...
Connected to 192.168.122.103.
Escape character is '^]'.
^]
telnet> q
Connection closed.
[root#Host-591 ~]# ifconfig virbr0
virbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255
ether 52:54:00:1a:d4:4d txqueuelen 1000 (Ethernet)
RX packets 30436 bytes 7466531 (7.1 MiB)
RX errors 0 dropped 24 overruns 0 frame 0
TX packets 42414 bytes 65991140 (62.9 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
The iptables rules on Docker03 VM instance looks like:
root#Docker03:/home/ubuntu/docker/app3# iptables -t nat -vL -n
Chain PREROUTING (policy ACCEPT 1 packets, 84 bytes)
pkts bytes target prot opt in out source destination
1 84 DOCKER-INGRESS all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL
3 204 DOCKER all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL
Chain INPUT (policy ACCEPT 1 packets, 84 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 5 packets, 364 bytes)
pkts bytes target prot opt in out source destination
4 240 DOCKER-INGRESS all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL
0 0 DOCKER all -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL
Chain POSTROUTING (policy ACCEPT 5 packets, 364 bytes)
pkts bytes target prot opt in out source destination
4 240 MASQUERADE all -- * docker_gwbridge 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match src-type LOCAL
0 0 MASQUERADE all -- * !docker_gwbridge 172.18.0.0/16 0.0.0.0/0
9 582 MASQUERADE all -- * !docker0 172.17.0.0/16 0.0.0.0/0
Chain DOCKER (2 references)
pkts bytes target prot opt in out source destination
0 0 RETURN all -- docker_gwbridge * 0.0.0.0/0 0.0.0.0/0
0 0 RETURN all -- docker0 * 0.0.0.0/0 0.0.0.0/0
Chain DOCKER-INGRESS (2 references)
pkts bytes target prot opt in out source destination
3 180 DNAT tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:5003 to:172.18.0.2:5003
1 84 RETURN all -- * * 0.0.0.0/0 0.0.0.0/0
In the DOCKER-INGRESS chain, the pkt count increases when I try to access the server from Docker03 instance, but does not increase when I try to access it from the host machine (Host-591).
Inside Docker03:
root#Docker03:/home/ubuntu/docker/app3# ifconfig
...
docker_gwbridge Link encap:Ethernet HWaddr 02:42:a5:66:fb:c6
inet addr:172.18.0.1 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:a5ff:fe66:fbc6/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:55 errors:0 dropped:0 overruns:0 frame:0
TX packets:48 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:3768 (3.7 KB) TX bytes:3560 (3.5 KB)
ens3 Link encap:Ethernet HWaddr 52:54:00:4d:a9:67
inet addr:192.168.122.103 Bcast:192.168.122.255 Mask:255.255.255.0
inet6 addr: fe80::5054:ff:fe4d:a967/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:16582 errors:0 dropped:8 overruns:0 frame:0
TX packets:7988 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:27505482 (27.5 MB) TX bytes:773745 (773.7 KB)
...
root#Docker03:/home/ubuntu/docker/app3# docker service ls
ID NAME MODE REPLICAS IMAGE
jgsuip3oda4e app3_web replicated 1/1 app3-web:v1
root#Docker03:/home/ubuntu/docker/app3# docker service inspect jgsuip3oda4e
[
{
"ID": "jgsuip3oda4ef2soefj0ce2oh",
"Version": {
"Index": 26
},
"CreatedAt": "2018-08-20T16:13:40.627151395Z",
"UpdatedAt": "2018-08-20T16:13:40.628064367Z",
"Spec": {
"Name": "app3_web",
"Labels": {
"com.docker.stack.namespace": "app3"
},
"TaskTemplate": {
"ContainerSpec": {
"Image": "app3-web:v1",
"Labels": {
"com.docker.stack.namespace": "app3"
}
},
"Resources": {},
"Placement": {},
"ForceUpdate": 0
},
"Mode": {
"Replicated": {
"Replicas": 1
}
},
"Networks": [
{
"Target": "giz5m1weca0xjlcsxjnvm5e81",
"Aliases": [
"web"
]
}
],
"EndpointSpec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 81,
"PublishedPort": 5003,
"PublishMode": "ingress"
}
]
}
},
"Endpoint": {
"Spec": {
"Mode": "vip",
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 81,
"PublishedPort": 5003,
"PublishMode": "ingress"
}
]
},
"Ports": [
{
"Protocol": "tcp",
"TargetPort": 81,
"PublishedPort": 5003,
"PublishMode": "ingress"
}
],
"VirtualIPs": [
{
"NetworkID": "s067fap1788lt9le1nfc5l2yh",
"Addr": "10.255.0.3/16"
},
{
"NetworkID": "giz5m1weca0xjlcsxjnvm5e81",
"Addr": "10.0.0.2/24"
}
]
},
"UpdateStatus": {
"StartedAt": "0001-01-01T00:00:00Z",
"CompletedAt": "0001-01-01T00:00:00Z"
}
}
]
root#Docker03:/home/ubuntu/docker/app3# docker network ls
NETWORK ID NAME DRIVER SCOPE
giz5m1weca0x app3_webnet overlay swarm
a2a6a0d8d2eb bridge bridge local
3d5bf5444e12 docker_gwbridge bridge local
97d487b3203e host host local
s067fap1788l ingress overlay swarm
efb9d06c92a8 none null local
root#Docker03:/home/ubuntu/docker/app3# docker network inspect docker_gwbridge
[
{
"Name": "docker_gwbridge",
"Id": "3d5bf5444e12adb0d8ed307144de2047372b5f56b2dead9718b414c8e6afa75b",
"Created": "2018-08-20T12:04:26.440509262-04:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.18.0.0/16",
"Gateway": "172.18.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Containers": {
"9eb13ae864ef07243c9b6c89713680248db9ba8e4e914e2f0173923c38d87d6f": {
"Name": "gateway_9eb13ae864ef",
"EndpointID": "48e44bfe94366c783f8bc59d1ed1bc3b8cefbbe534cdb4bf7cedfc4852b91213",
"MacAddress": "02:42:ac:12:00:03",
"IPv4Address": "172.18.0.3/16",
"IPv6Address": ""
},
"ingress-sbox": {
"Name": "gateway_ingress-sbox",
"EndpointID": "a9e15a62d6a678b2beb078f2eb99933c48ce44ebf4d2cc2912090ef75a12b75d",
"MacAddress": "02:42:ac:12:00:02",
"IPv4Address": "172.18.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.enable_icc": "false",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.name": "docker_gwbridge"
},
"Labels": {}
}
]
root#Docker03:/home/ubuntu/docker/app3# docker network inspect app3_webnet
[
{
"Name": "app3_webnet",
"Id": "giz5m1weca0xjlcsxjnvm5e81",
"Created": "2018-08-20T12:13:40.787096192-04:00",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Containers": {
"9eb13ae864ef07243c9b6c89713680248db9ba8e4e914e2f0173923c38d87d6f": {
"Name": "app3_web.1.8cejzgd75xul8brdjjjjnq0rb",
"EndpointID": "b5717c1dff888d993ff9a573b7967f90165c35e35774ca479b5d37cf0821e00d",
"MacAddress": "02:42:0a:00:00:03",
"IPv4Address": "10.0.0.3/24",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "4097"
},
"Labels": {
"com.docker.stack.namespace": "app3"
},
"Peers": [
{
"Name": "Docker03-03ead807e067",
"IP": "192.168.122.103"
}
]
}
]
Here is the docker compose file that I am using:
root#Docker03:/home/ubuntu/docker/app3# cat docker-compose.yml
version: '3'
services:
web:
image: "app3-web:v1"
ports:
- "5003:81"
networks:
- "webnet"
networks:
webnet:
root#Docker03:/home/ubuntu/docker/app3# netstat -tulpn | grep 5003
tcp6 0 0 :::5003 :::* LISTEN 1610/dockerd
The Dockerfile for the app looks like this:
root#Docker03:/home/ubuntu/docker/app3# cat web/Dockerfile
FROM python:3.4-alpine
ADD . /web
WORKDIR /web
RUN pip install --proxy <proxy_ip_address:port> --trusted-host pypi.python.org -r requirements.txt
EXPOSE 81
# set environment variable so that python does not buffer any output logs
ENV PYTHONUNBUFFERED 0
CMD ["python", "index.py"]
I tried to spin up another VM instance connected to the 192.168.122.0/24 network and access the Flask server, and it worked fine. It seems only access from outside the VMs is not working. Docker version I am using:
root#Docker03:/home/ubuntu/docker/app3# docker --version
Docker version 17.03.2-ce, build f5ec1e2
Any help is much appreciated.
Thanks,
Related
I know that the CPU utilization of the container can be obtained by docker stats:
#docker stats
CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS
05076af468cd mystifying_kepler 0.02% 10.5MiB / 5.712GiB 0.18% 656B / 0B 0B / 0B 1
And I want to get this data through the HTTP api:http api.
Data i get from this http api is :
{
"read": "2019-11-26T22:18:33.027963669Z",
"preread": "2019-11-26T22:18:32.013978454Z",
"pids_stats": {
"current": 1
},
"blkio_stats": {
"io_service_bytes_recursive": [],
"io_serviced_recursive": [],
"io_queue_recursive": [],
"io_service_time_recursive": [],
"io_wait_time_recursive": [],
"io_merged_recursive": [],
"io_time_recursive": [],
"sectors_recursive": []
},
"num_procs": 0,
"storage_stats": {},
"cpu_stats": {
"cpu_usage": {
"total_usage": 361652820,
"percpu_usage": [361652820],
"usage_in_kernelmode": 50000000,
"usage_in_usermode": 100000000
},
"system_cpu_usage": 144599100000000,
"online_cpus": 1,
"throttling_data": {
"periods": 0,
"throttled_periods": 0,
"throttled_time": 0
}
},
"precpu_stats": {
"cpu_usage": {
"total_usage": 361488978,
"percpu_usage": [361488978],
"usage_in_kernelmode": 50000000,
"usage_in_usermode": 100000000
},
"system_cpu_usage": 144598090000000,
"online_cpus": 1,
"throttling_data": {
"periods": 0,
"throttled_periods": 0,
"throttled_time": 0
}
},
"memory_stats": {
"usage": 11005952,
"max_usage": 11108352,
"stats": {
"active_anon": 11005952,
"active_file": 0,
"cache": 0,
"dirty": 0,
"hierarchical_memory_limit": 9223372036854771712,
"hierarchical_memsw_limit": 9223372036854771712,
"inactive_anon": 0,
"inactive_file": 0,
"mapped_file": 0,
"pgfault": 8151,
"pgmajfault": 0,
"pgpgin": 4137,
"pgpgout": 1450,
"rss": 11005952,
"rss_huge": 0,
"total_active_anon": 11005952,
"total_active_file": 0,
"total_cache": 0,
"total_dirty": 0,
"total_inactive_anon": 0,
"total_inactive_file": 0,
"total_mapped_file": 0,
"total_pgfault": 8151,
"total_pgmajfault": 0,
"total_pgpgin": 4137,
"total_pgpgout": 1450,
"total_rss": 11005952,
"total_rss_huge": 0,
"total_unevictable": 0,
"total_writeback": 0,
"unevictable": 0,
"writeback": 0
},
"limit": 6133108736
},
"name": "/mystifying_kepler",
"id": "05076af468cdeb3d15d147a25e8ccee5f4d029ffcba1d60f14f84e2c9e25d6a9",
"networks": {
"eth0": {
"rx_bytes": 656,
"rx_packets": 8,
"rx_errors": 0,
"rx_dropped": 0,
"tx_bytes": 0,
"tx_packets": 0,
"tx_errors": 0,
"tx_dropped": 0
}
}
}
I was able to calculate the utilization of memory from the data, but I didn't find a way to get the CPU utilization .
And ideas?
You've probably solved this by now, but for the next person... This example is in Python, but the data fields and math are the same if you're making API calls.
The API returns cumulative values, so you need more than one sample - do the math using the difference between samples to get the utilization for that period. This example uses the streaming mode, which pushes an update every second.
# These initial values will seed the "last" cycle's saved values
containerCPU = 0
systemCPU = 0
container = client.containers.get(containerID)
#This function is blocking; the loop will proceed when there's a new update to iterate
for stats in container.stats(decode=True):
#Save the values from the last sample
lastContainerCPU = containerCPU
lastSystemCPU = systemCPU
#Get the container's usage, the total system capacity, and the number of CPUs
#The math returns a Linux-style %util, where 100.0 = 1 CPU core fully used
containerCPU = stats.get('cpu_stats',{}).get('cpu_usage',{}).get('total_usage')
systemCPU = stats.get('cpu_stats',{}).get('system_cpu_usage')
numCPU = len(stats.get('cpu_stats',{}).get('cpu_usage',{}).get('percpu_usage',0))
# Skip the first sample (result will be wrong because the saved values are 0)
if lastContainerCPU and lastSystemCPU:
cpuUtil = (containerCPU - lastContainerCPU) / (systemCPU - lastSystemCPU)
cpuUtil = cpuUtil * numCPU * 100
print(cpuUtil)
Trying to make it clustering with docker compose.
I have two elasticsearch docker containers which are deployed in different Docker Hosts.
docker version: 18.06.3-ce
elasticsearch : 6.5.2
docker-compose.yml for docker-container-1
services:
elasticsearch:
restart: always
hostname: elasticsearch
image: docker-elk/elasticsearch:1.0.0
build:
context: elasticsearch
dockerfile: Dockerfile
environment:
discovery.type: zen
ports:
- 9200:9200
- 9300:9300
env_file:
- ./elasticsearch/elasticsearch.env
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
docker-compose.yml for docker-container-2
services:
elasticsearch:
restart: always
hostname: elasticsearch
image: docker-elk/elasticsearch:1.0.0
build:
context: elasticsearch
dockerfile: Dockerfile
environment:
discovery.type: zen
ports:
- 9200:9200
- 9300:9300
env_file:
- ./elasticsearch/elasticsearch.env
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
elasticsearch.yml on the elasticsearch-docker-container-1 on the Docker-Host 1
xpack.security.enabled: true
cluster.name: es-cluster
node.name: es1
network.host: 0.0.0.0
node.master: true
node.data: true
transport.tcp.port: 9300
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
discovery.zen.minimum_master_nodes: 2
gateway.recover_after_nodes: 1
discovery.zen.ping.unicast.hosts: ["host1:9300", "host2:9300","host1:9200", "host2:9200"]
network.publish_host: host1
elasticsearch.yml on the elasticsearch-docker-container-2 on the Docker-Host 2
xpack.security.enabled: true
cluster.name: es-cluster
node.name: es2
network.host: 0.0.0.0
node.master: true
node.data: true
transport.tcp.port: 9300
path.data: /usr/share/elasticsearch/data
path.logs: /usr/share/elasticsearch/logs
discovery.zen.minimum_master_nodes: 2
gateway.recover_after_nodes: 1
discovery.zen.ping.unicast.hosts: ["host1:9300", "host2:9300","host1:9200", "host2:9200"]
network.publish_host: host2
Below is the result of GET /_cluster/health?pretty and it shows that there is only one node.
{
"cluster_name" : "dps_geocluster",
"status" : "yellow",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 33,
"active_shards" : 33,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 30,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 52.38095238095239
}
According to the document below at least three elasticsearch nodes are required.
https://www.elastic.co/guide/en/elasticsearch/reference/6.5/modules-node.html
Each elasticsearch container should be at different Docker host?
The following was the cause of error. After increasing the value of vm.max_map_count into 262144 with sysctl, it works fine.
elasticsearch_1 | [1]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
Now number_of_nodes is 2.
{
"cluster_name" : "es-cluster",
"status" : "yellow",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 35,
"active_shards" : 37,
"relocating_shards" : 0,
"initializing_shards" : 2,
"unassigned_shards" : 31,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 52.85714285714286
}
I am currently working with Highcharts in combination with the pattern fill module. When I set a pattern for a series in the chart, the pattern is shown but it has a transparent background. I need to set an additional background because the pattern is overlapping with another series which I don't want to see behind it. You can check this fiddle. So basically I don't want to see those three columns on the left behind the pattern. Any ideas how I can do that? I haven't seen any options to set an additional background, but maybe you know some trick. This is the code I am using for the pattern:
"color": {
"pattern": {
"path": {
"d": "M 0 0 L 10 10 M 9 -1 L 11 1 M -1 9 L 1 11"
},
"width": 10,
"height": 10,
"opacity": 1,
"color": "rgb(84,198,232)"
}
}
You need to set fill attribute as a path property:
"color": {
"pattern": {
"path": {
"d": "M 0 0 L 10 10 M 9 -1 L 11 1 M -1 9 L 1 11",
fill: 'red'
},
"width": 10,
"height": 10,
"opacity": 1,
"color": 'rgb(84,198,232)'
}
}
Live demo: https://jsfiddle.net/BlackLabel/m9rxwej5/
I guess there's been an update. backgroundColor should be set at pattern's root level:
"color": {
"pattern": {
"backgroundColor": 'red',
"path": {
"d": "M 0 0 L 10 10 M 9 -1 L 11 1 M -1 9 L 1 11",
},
"width": 10,
"height": 10,
"opacity": 1,
"color": 'rgb(84,198,232)',
}
}
https://jsfiddle.net/vL4fqhao/
In Maxima, we have matrix_element_add, matrix_element_mult and matrix_element_transpose.
Is there a matrix_element_inv, and if not, how could I make one?
If you want to invert matrix,first remember that not all matrix can be inverted, so first be sure that your matrix can be inverted.
For maxima working with matrix the operator for multiplying is .
so with A . A = A^2
if we want to get this value is A^^2
Normally the operator apply to each element of the matrix so if you would to invert all the elements:
(%i1) A: matrix ([17, 3], [-8, 11]);
[ 17 3 ]
(%o1) [ ]
[ - 8 11 ]
(%i9) A^-1;
[ 1 1 ]
[ -- - ]
[ 17 3 ]
(%o9) [ ]
[ 1 1 ]
[ - - -- ]
[ 8 11 ]
then to get the inverse of a matrix:
(%i2) B: A^^-1;
[ 11 3 ]
[ --- - --- ]
[ 211 211 ]
(%o2) [ ]
[ 8 17 ]
[ --- --- ]
[ 211 211 ]
(%i4) B.A;
[ 1 0 ]
(%o4) [ ]
[ 0 1 ]
(%i5) A.B;
[ 1 0 ]
(%o5) [ ]
[ 0 1 ]
be sure that your matrix is invertible:
(%i6) Bad: matrix ([2, 3], [4, 6]);
[ 2 3 ]
(%o6) [ ]
[ 4 6 ]
(%i7) Bad^^-1;
expt: undefined: 0 to a negative exponent.
-- an error. To debug this try: debugmode(true);
(%i8) newdet(Bad);
(%o8)/R/ 0
Now you should read carefully this section:
http://maxima.sourceforge.net/docs/manual/maxima_23.html
specially when telling about
matrix_element_add
so really there are only this opereators so doesn't exist a matrix_element_inv
so you can write your own using lambda functions as follows for example for getting the transpose of all the inverted elements:
(%i10) matrix_element_transpose: lambda ([x], x^-1)$
(%i11) transpose(A);
[ 1 1 ]
[ -- - - ]
[ 17 8 ]
(%o11) [ ]
[ 1 1 ]
[ - -- ]
[ 3 11 ]
hope this helps
I'm installing Sentry with Docker according with instruction.
I has ran sentry with -p 8080:9000 option:
docker run -d -p 8080:9000 --name my-sentry -e SENTRY_SECRET_KEY='<secret-key>' --link sentry-redis:redis --link sentry-postgres:postgres sentry
And I have next output of ifconfig:
$ ifconfig
docker0 Link encap:Ethernet HWaddr 02:42:0b:b2:d5:41
inet addr:172.17.0.1 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:bff:feb2:d541/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:123 errors:0 dropped:0 overruns:0 frame:0
TX packets:78 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:6960 (6.9 KB) TX bytes:8622 (8.6 KB)
enp0s3 Link encap:Ethernet HWaddr 08:00:27:6d:a0:12
inet addr:192.168.1.207 Bcast:192.168.1.255 Mask:255.255.255.0
inet6 addr: fe80::a00:27ff:fe6d:a012/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:2609 errors:0 dropped:0 overruns:0 frame:0
TX packets:1809 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:260348 (260.3 KB) TX bytes:279424 (279.4 KB)
...
So, I trying to open url http://192.168.1.207:8080
Browser has redirected me to page http://192.168.1.207:8080/auth/login/ with error ERR_EMPTY_RESPONSE.
What is wrong? I'm new in Docker. How to view logs where can be error message?
UPDATE
Found way to show logs
$ docker logs my-sentry
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/raven/middleware.py", line 98, in __call__
iterable = self.application(environ, start_response)
File "/usr/local/lib/python2.7/site-packages/sentry/wsgi.py", line 41, in __call__
response = super(FileWrapperWSGIHandler, self).__call__(environ, start_response)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/core/handlers/wsgi.py", line 206, in __call__
response = self.get_response(request)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/core/handlers/base.py", line 194, in get_response
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
File "/usr/local/lib/python2.7/site-packages/sentry/../django/core/handlers/base.py", line 236, in handle_uncaught_exception
return callback(request, **param_dict)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/views/generic/base.py", line 69, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/web/frontend/error_500.py", line 59, in dispatch
return HttpResponseServerError(t.render(Context(context)))
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 140, in render
return self._render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/test/utils.py", line 85, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 840, in render
bit = self.render_node(node, context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/debug.py", line 78, in render_node
return node.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/loader_tags.py", line 123, in render
return compiled_parent._render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/test/utils.py", line 85, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 840, in render
bit = self.render_node(node, context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/debug.py", line 78, in render_node
return node.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/loader_tags.py", line 123, in render
return compiled_parent._render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/test/utils.py", line 85, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 840, in render
bit = self.render_node(node, context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/debug.py", line 78, in render_node
return node.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 840, in render
bit = self.render_node(node, context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/debug.py", line 78, in render_node
return node.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/loader_tags.py", line 155, in render
return self.render_template(self.template, context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/loader_tags.py", line 137, in render_template
output = template.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 140, in render
return self._render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/test/utils.py", line 85, in instrumented_test_render
return self.nodelist.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 840, in render
bit = self.render_node(node, context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/debug.py", line 78, in render_node
return node.render(context)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/template/base.py", line 1125, in render
return func(*resolved_args, **resolved_kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/templatetags/sentry_dsn.py", line 30, in public_dsn
key = _get_project_key(project_id)
File "/usr/local/lib/python2.7/site-packages/sentry/templatetags/sentry_dsn.py", line 18, in _get_project_key
)[0]
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/models/query.py", line 132, in __getitem__
return list(qs)[0]
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/models/query.py", line 96, in __iter__
self._fetch_all()
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/models/query.py", line 857, in _fetch_all
self._result_cache = list(self.iterator())
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/models/query.py", line 220, in iterator
for row in compiler.results_iter():
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/models/sql/compiler.py", line 713, in results_iter
for rows in self.execute_sql(MULTI):
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/models/sql/compiler.py", line 785, in execute_sql
cursor = self.connection.cursor()
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/backends/__init__.py", line 162, in cursor
cursor = util.CursorWrapper(self._cursor(), self)
File "/usr/local/lib/python2.7/site-packages/sentry/db/postgres/decorators.py", line 42, in inner
return func(self, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/db/postgres/base.py", line 60, in _cursor
cursor = super(DatabaseWrapper, self)._cursor()
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/backends/__init__.py", line 132, in _cursor
self.ensure_connection()
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/backends/__init__.py", line 127, in ensure_connection
self.connect()
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/utils.py", line 99, in __exit__
six.reraise(dj_exc_type, dj_exc_value, traceback)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/backends/__init__.py", line 127, in ensure_connection
self.connect()
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/backends/__init__.py", line 115, in connect
self.connection = self.get_new_connection(conn_params)
File "/usr/local/lib/python2.7/site-packages/sentry/../django/db/backends/postgresql_psycopg2/base.py", line 115, in get_new_connection
return Database.connect(**conn_params)
File "/usr/local/lib/python2.7/site-packages/psycopg2/__init__.py", line 164, in connect
conn = _connect(dsn, connection_factory=connection_factory, async=async)
django.db.utils.OperationalError: FATAL: password authentication failed for user "postgres"
...
Found solution. I run docker container with environment variables described in sentry.conf.py, -e SENTRY_DB_USER=sentry -e SENTRY_DB_PASSWORD=secret:
docker run -p 8080:9000 -d --name my-sentry \
-e SENTRY_SECRET_KEY=$SENTRY_SECRET_KEY \
-e SENTRY_DB_USER=sentry \
-e SENTRY_DB_PASSWORD=secret \
--link sentry-redis:redis --link sentry-postgres:postgres sentry