Not able to see the logs in Custom Location in Kubernetes - docker

I am using zap as the logger. I am storing the log in the custom location: /var/tmp/audit.log
Unfortunately, when I try to look at the logs in Kubernetes I am not able to find those logs. Can anyone suggest what I am missing? Do I have to volume mount the logs? I can find the logs if I do ssh into the Kubernetes pods but I can't find it when I do kubectl logs pod name
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
"io"
)
var (
Logger *zap.Logger
AuditLogging *zap.Logger
)
type WriteSyncer struct {
io.Writer
}
func (ws WriteSyncer) Sync() error {
return nil
}
func InitLogging(mode string) {
var cfg zap.Config
var logName = "/var/tmp/abc.log"
var slogName = "/var/tmp/audit.log"
if mode == "production" {
cfg = zap.NewProductionConfig()
cfg.DisableCaller = true
} else {
cfg = zap.NewDevelopmentConfig()
cfg.EncoderConfig.LevelKey = "level"
cfg.EncoderConfig.NameKey = "name"
cfg.EncoderConfig.MessageKey = "msg"
cfg.EncoderConfig.CallerKey = "caller"
cfg.EncoderConfig.StacktraceKey = "stacktrace"
}
cfg.Encoding = "json"
cfg.EncoderConfig.TimeKey = "timestamp"
cfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
cfg.OutputPaths = []string{logName}
sw := getWriteSyncer(logName)
swSugar := getWriteSyncer(slogName)
l, err := cfg.Build(SetOutput(sw, cfg))
if err != nil {
panic(err)
}
defer l.Sync()
ls, err := cfg.Build(SetOutput(swSugar, cfg))
if err != nil {
panic(err)
}
defer ls.Sync()
Logger = l
AuditLogging = ls
}
// SetOutput replaces existing Core with new, that writes to passed WriteSyncer.
func SetOutput(ws zapcore.WriteSyncer, conf zap.Config) zap.Option {
var enc zapcore.Encoder
switch conf.Encoding {
case "json":
enc = zapcore.NewJSONEncoder(conf.EncoderConfig)
case "console":
enc = zapcore.NewConsoleEncoder(conf.EncoderConfig)
default:
panic("unknown encoding")
}
return zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewCore(enc, ws, conf.Level)
})
}
func getWriteSyncer(logName string) zapcore.WriteSyncer {
var ioWriter = &lumberjack.Logger{
Filename: logName,
MaxSize: 10, // MB
MaxBackups: 3, // number of backups
MaxAge: 28, //days
LocalTime: true,
Compress: false, // disabled by default
}
var sw = WriteSyncer{
ioWriter,
}
return sw
}
Here is how my yaml looks like
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml
kompose.version: 1.22.0 (955b78124)
creationTimestamp: null
labels:
io.kompose.service: service
name: service
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: service
strategy: {}
template:
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml
kompose.version: 1.22.0 (955b78124)
creationTimestamp: null
labels:
io.kompose.service: service
spec:
containers:
- image: services
imagePullPolicy: Never
name: service
ports:
- containerPort: 5001
- containerPort: 5002
resources: {}
volumeMounts:
- name: varlog
mountPath: /var/tmp
restartPolicy: Always
volumes:
- name: varlog
hostPath:
path: /var/tmp
status: {}
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml
kompose.version: 1.22.0 (955b78124)
creationTimestamp: null
labels:
io.kompose.service: service
name: service
spec:
ports:
- name: "5001"
port: 5001
targetPort: 5001
- name: "5002"
port: 5002
targetPort: 5002
selector:
io.kompose.service: service
status:
loadBalancer: {}

Related

How to connect go grpc server with dart grpc client using Envoy and Grpc_web

I'm new to grpc_web and envoy.
Please help me to setup following things,
GRPC_Go server is running on ec2 instance as a docker container
Dart web client is running on local pc
Need to make grpc call request from dart web app to grpc_go server
Used envoy proxy for the request forward. Envoy proxy is running as a container in same ec2 instance
I'm getting the following error "Response: null, trailers: {access-control-allow-credentials: true, access-control-allow-origin: http://127.0.0.1:9000, vary: Origin})".
Grpc_Go:
package main
import (
"context"
"flag"
"fmt"
"log"
"net"
"google.golang.org/grpc"
pb "google.golang.org/grpc/examples/helloworld/helloworld"
)
var (
port = flag.Int("port", 50051, "The server port")
)
// server is used to implement helloworld.GreeterServer.
type server struct {
pb.UnimplementedGreeterServer
}
// SayHello implements helloworld.GreeterServer
func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
log.Printf("Received: %v", in.GetName())
return &pb.HelloReply{Message: "Hello " + in.GetName()}, nil
}
func (s *server) SayHelloAgain(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply,
error)
{
return &pb.HelloReply{Message: "Hello again " + in.GetName()}, nil
}
func main() {
flag.Parse()
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterGreeterServer(s, &server{})
log.Printf("server listening at %v", lis.Addr())
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
GRPC_dart_client:
import 'package:grpc/grpc_web.dart';
import 'package:grpc_web/app.dart';
import 'package:grpc_web/src/generated/echo.pbgrpc.dart';
void main() {
final channel = GrpcWebClientChannel.xhr(Uri.parse('http://ec2-ip:8080'));
final service = EchoServiceClient(channel);
final app = EchoApp(service);
final button = querySelector('#send') as ButtonElement;
button.onClick.listen((e) async {
final msg = querySelector('#msg') as TextInputElement;
final value = msg.value!.trim();
msg.value = '';
if (value.isEmpty) return;
if (value.indexOf(' ') > 0) {
final countStr = value.substring(0, value.indexOf(' '));
final count = int.tryParse(countStr);
if (count != null) {
app.repeatEcho(value.substring(value.indexOf(' ') + 1), count);
} else {
app.echo(value);
}
} else {
app.echo(value);
}
});
}
envoy.yaml:
access_log_path: /tmp/admin_access.log
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }
static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 8080 }
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"#type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route:
cluster: echo_service
timeout: 0s
max_stream_duration:
grpc_timeout_header_max: 0s
cors:
allow_origin_string_match:
- prefix: "*"
allow_methods: GET, PUT, DELETE, POST, OPTIONS
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
max_age: "1728000"
expose_headers: custom-header-1,grpc-status,grpc-message
http_filters:
- name: envoy.filters.http.grpc_web
- name: envoy.filters.http.cors
- name: envoy.filters.http.router
clusters:
- name: echo_service
connect_timeout: 0.25s
type: logical_dns
http2_protocol_options: {}
lb_policy: round_robin
load_assignment:
cluster_name: cluster_0
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: app
port_value: 50051
Grpc_go_docker_file:
# Install git.
# Git is required for fetching the dependencies.
RUN apk update && apk add --no-cache git
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main .
# Start a new stage from scratch
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
# Copy the Pre-built binary file from the previous stage. Observe we also copied the .env file
COPY --from=builder /app/main .
# Expose port 50051 to the outside world
EXPOSE 50051
CMD ["./main"]
Envoy_Docker:
COPY envoy.yaml /etc/envoy/envoy.yaml
CMD /usr/local/bin/envoy -c /etc/envoy/envoy.yaml -l trace --log-path /tmp/envoy_info.log
I'm stuck with it more than two days, please help me. Thanks in advance
Thank you all, for your reply.
I fixed this issue with IP of the ec2 instance.
clusters:
- name: echo_service
connect_timeout: 0.25s
type: logical_dns
http2_protocol_options: {}
lb_policy: round_robin
load_assignment:
cluster_name: cluster_0
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: app
port_value: 50051
Instead of container 'address: app' (app is container name) in the envoy.yaml, I used the ip of ec2 instance and container port now envoy is forwarding the request to server.

DAPR middleware: OPA. Broken configuration

I have set up DAPR to run a sidecar on my application that's absolutely fine. i am now trying to get OPA injected as middleware in the DAPR call. this should be simple... Set up the application annotation to config, the pipeline configuration, and the component:
Application annotation:
apiVersion: apps/v1
kind: Deployment
metadata:
...
spec:
...
template:
metadata:
...
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "appname"
dapr.io/app-port: "1003"
dapr.io/config: "opa-pipeline"
Pipeline configuration:
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: opa-pipeline
namespace: default
spec:
httpPipeline:
handlers:
- name: opa-component
type: middleware.http.opa
Component:
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: opa-component
namespace: default
spec:
type: middleware.http.opa
version: v1
metadata:
- name: defaultStatus
value: 403
- name: rego
value: |
package http
default allow = true
# Allow may also be an object and include other properties
# For example, if you wanted to redirect on a policy failure, you could set the status code to 301 and set the location header on the response:
allow = {
"status_code": 301,
"additional_headers": {
"location": "https://my.site/authorize"
}
} {
not jwt.payload["my-claim"]
}
# You can also allow the request and add additional headers to it:
allow = {
"allow": true,
"additional_headers": {
"x-my-claim": my_claim
}
} {
my_claim := jwt.payload["my-claim"]
}
jwt = { "payload": payload } {
auth_header := input.request.headers["authorization"]
[_, jwt] := split(auth_header, " ")
[_, payload, _] := io.jwt.decode(jwt)
}
Can anyone tell me why I have an error with this?
Error:
time="2021-08-19T15:32:20.3742084Z" level=info msg="enabled middleware.http.opa/ http middleware" app_id=gql instance=gql-deployment-dcccd9fcf-d7wjb scope=dapr.runtime type=log ver=1.3.0
time="2021-08-19T15:32:20.3742963Z" level=info msg="enabled gRPC tracing middleware" app_id=gql instance=gql-deployment-dcccd9fcf-d7wjb scope=dapr.runtime.grpc.api type=log ver=1.3.0
time="2021-08-19T15:32:20.3744374Z" level=info msg="enabled gRPC metrics middleware" app_id=gql instance=gql-deployment-dcccd9fcf-d7wjb scope=dapr.runtime.grpc.api type=log ver=1.3.0
time="2021-08-19T15:32:20.3745582Z" level=info msg="API gRPC server is running on port 50001" app_id=gql instance=gql-deployment-dcccd9fcf-d7wjb scope=dapr.runtime type=log ver=1.3.0
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x16ceb7a]
goroutine 1 [running]:
github.com/dapr/dapr/pkg/middleware/http.Pipeline.Apply(0xc000499118, 0x1, 0x1, 0xc000b084c0, 0x1)
/home/runner/work/dapr/dapr/pkg/middleware/http/http_pipeline.go:27 +0x3a
github.com/dapr/dapr/pkg/http.(*server).useComponents(...)
/home/runner/work/dapr/dapr/pkg/http/server.go:110
github.com/dapr/dapr/pkg/http.(*server).StartNonBlocking(0xc0005cc180)
/home/runner/work/dapr/dapr/pkg/http/server.go:64 +0x67
github.com/dapr/dapr/pkg/runtime.(*DaprRuntime).startHTTPServer(0xc000a78a00, 0xdac, 0x1e61, 0x3faf1b0, 0x1, 0xc000499118, 0x1, 0x1)
/home/runner/work/dapr/dapr/pkg/runtime/runtime.go:786 +0x512
github.com/dapr/dapr/pkg/runtime.(*DaprRuntime).initRuntime(0xc000a78a00, 0xc000312bb0, 0xa, 0xc0005bb180)
/home/runner/work/dapr/dapr/pkg/runtime/runtime.go:345 +0x63b
github.com/dapr/dapr/pkg/runtime.(*DaprRuntime).Run(0xc000a78a00, 0xc000abff40, 0x7, 0x7, 0x66, 0xc00009e000)
/home/runner/work/dapr/dapr/pkg/runtime/runtime.go:221 +0x25e
main.main()
/home/runner/work/dapr/dapr/cmd/daprd/main.go:151 +0x1126
There was an error in parsing value of the defaultStatus: https://github.com/dapr/dapr/issues/3216. It was fixed in v1.5.

How to start a docker container inside the setup?

I have created a DroneCI pipeline with the following content:
kind: pipeline
type: docker
name: Build auto git tagger
steps:
- name: test and build
image: golang
commands:
- go mod download
- go test ./test
- go build -o ./build/package ./cmd/git-tagger
- name: Build docker image
image: plugins/docker
pull: if-not-exists
settings:
username:
password:
repo:
dockerfile:
registry:
auto_tag:
trigger:
branch:
- master
The go test starts a gogs docker container for testing purpose, here is the code:
func createGogsContainer(dest, waitUrl string) (stopContainer, error) {
client, err := docker.NewClientFromEnv()
if err != nil {
return nil, err
}
ctx := context.Background()
gogs, err := client.CreateContainer(docker.CreateContainerOptions{
Name: "repo",
Config: &docker.Config{
Image: "gogs/gogs",
},
HostConfig: &docker.HostConfig{
PublishAllPorts: true,
AutoRemove: true,
Mounts: []docker.HostMount{
{
Type: "bind",
Source: dest,
Target: "/data",
}},
PortBindings: map[docker.Port][]docker.PortBinding{
"3000/tcp": {{HostIP: "0.0.0.0", HostPort: "8888"}},
"22/tcp": {{HostIP: "0.0.0.0", HostPort: "2222"}},
},
},
Context: ctx,
})
if err != nil {
return nil, err
}
err = client.StartContainer(gogs.ID, nil)
if err != nil {
return nil, err
}
//Wait for connection
host, err := url.Parse(waitUrl)
if err != nil {
return nil, err
}
err = waitHTTP(fmt.Sprintf("%s://%s", host.Scheme, host.Host), 3, 0)
if err != nil {
return nil, err
}
return func() error {
return client.StopContainerWithContext(gogs.ID, 5, ctx)
}, nil
}
The pipeline has been aborted with following error message:
latest: Pulling from library/golang
Digest: sha256:f30b0d05ea7783131d84deea3b5f4d418d9d930dfa3668a9a5fa253d1f9dce5a
Status: Image is up to date for golang:latest
+ go mod download
+ go test ./test
time="2020-04-23T17:58:24Z" level=error msg="Get \"http://0.0.0.0:8888/gat/WithoutTag.git/info/refs?service=git-upload-pack\": dial tcp 0.0.0.0:8888: connect: connection refused"
time="2020-04-23T17:58:24Z" level=error msg="Get \"http://0.0.0.0:8888/gat/WithoutTag.git/info/refs?service=git-upload-pack\": dial tcp 0.0.0.0:8888: connect: connection refused"
What am I doing wrong?
Have a look at Drone services. It allows you to bring up a container as part of your pipeline and access its ports.
In your case you can bring up the Gogs container like this:
services:
- name: gogs
image: gogs/gogs
And then use it like this in your pipeline steps:
steps:
- name: test and build
image: golang
commands:
- curl "http://gogs"
-
...
(this assumes the gogs container listens on port 80. If it's a different port then you need to adjust the URI).
Hint: the name of the service is the DNS name of the container.

Pulumi - how to pull a docker image from a private registry?

I've declared a Kubernetes deployment which has two containers. One is built locally, another needs to be pulled from a private registry.
const appImage = new docker.Image("ledgerImage", {
imageName: 'us.gcr.io/qwil-build/ledger',
build: "../../",
});
const ledgerDeployment = new k8s.extensions.v1beta1.Deployment("ledger", {
spec: {
template: {
metadata: {
labels: {name: "ledger"},
name: "ledger",
},
spec: {
containers: [
{
name: "api",
image: appImage.imageName,
},
{
name: "ssl-proxy",
image: "us.gcr.io/qwil-build/monolith-ssl-proxy:latest",
}
],
}
}
}
});
When I run pulumi up it hangs - this is happening because of a complaint that You don't have the needed permissions to perform this operation, and you may have invalid credentials. I see this complain when I run kubectl describe <name of pod>. However, when I run docker pull us.gcr.io/qwil-build/monolith-ssl-proxy:latest it executes just fine. I've re-reun gcloud auth configure-docker and it hasn't helped.
I found https://github.com/pulumi/pulumi-cloud/issues/112 but it seems that docker.Image requires a build arg which suggests to me it's meant for local images, not remote images.
How can I pull an image from a private registry?
EDIT:
Turns out I have a local dockerfile for building the SSL proxy I need. I've declared a new Image with
const sslImage = new docker.Image("sslImage", {
imageName: 'us.gcr.io/qwil-build/ledger-ssl-proxy',
build: {
context: "../../",
dockerfile: "../../Dockerfile.proxy"
}
});
And updated the image reference in the Deployment correctly. However, I'm still getting authentication problems.
I have a solution which uses only code, which I use to retrieve images from a private repository on Gitlab:
config.ts
import { Config } from "#pulumi/pulumi";
//
// Gitlab specific config.
//
const gitlabConfig = new Config("gitlab");
export const gitlab = {
registry: "registry.gitlab.com",
user: gitlabConfig.require("user"),
email: gitlabConfig.require("email"),
password: gitlabConfig.requireSecret("password"),
}
import * as config from "./config";
import { Base64 } from 'js-base64';
import * as kubernetes from "#pulumi/kubernetes";
[...]
const provider = new kubernetes.Provider("do-k8s", { kubeconfig })
const imagePullSecret = new kubernetes.core.v1.Secret(
"gitlab-registry",
{
type: "kubernetes.io/dockerconfigjson",
stringData: {
".dockerconfigjson": pulumi
.all([config.gitlab.registry, config.gitlab.user, config.gitlab.password, config.gitlab.email])
.apply(([server, username, password, email]) => {
return JSON.stringify({
auths: {
[server]: {
auth: Base64.encode(username + ":" + password),
username: username,
email: email,
password: password
}
}
})
})
}
},
{
provider: provider
}
);
// Then use the imagePullSecret in your deployment like this
deployment = new k8s.apps.v1.Deployment(name, {
spec: {
selector: { matchLabels: labels },
template: {
metadata: { labels: labels },
spec: {
imagePullSecrets: [{ name: args.imagePullSecret.metadata.apply(m => m.name) }],
containers: [container]
},
},
},
});
Turns out running pulumi destroy --yes && pulumi up --skip-preview --yes is what I needed. I guess I was in some weird inconsistent state but this is fixed now.
D'oh! Looks like RemoteImage is the answer: https://www.pulumi.com/docs/reference/pkg/nodejs/pulumi/docker/#RemoteImage
EDIT:
I tried
const sslImage = new docker.RemoteImage("sslImage", {
name: 'us.gcr.io/qwil-build/monolith-ssl-proxy:latest',
})
And I'm still getting authentication errors so this isn't the answer I think.
You need to give your cluster the credentials to your Docker Registry, so that it can pull the images from it.
The manual process would be:
docker login registry.gitlab.com
cat ~/.docker/config.json | base64
Then create a registry_secret.yaml with the output from above
apiVersion: v1
kind: Secret
metadata:
name: regsec
data:
.dockerconfigjson: ewJImF1dGhzIjogewoJCSJyZWdpc3RyeS5naXfRsYWsi7fQoJfSwKCSJIdHRwSGVhZGVycyI6IHsKCQkdiVXNlci1BZ2VudCI6ICJEb2NrZXItQ2xpZW50LzEaLjxzLxjUgKH9yIjogInN3YXJtIgp9
type: kubernetes.io/dockerconfigjson
and then apply it to your cluster with
kubectl apply -f registry_secret.yaml && kubectl get secrets
You can wrap that into Pulumi, as it supports yaml files like
new k8s.yaml.ConfigGroup("docker-secret", {files: "registry_secret.yaml"});
This only works if you have your credentials encoded in .docker/config.json and should not work if you are using a credential store
The alternative would be to create the secret directly by providing your user credentials and extracting the token
kubectl create secret docker-registry regsec \
--docker-server=registry.gitlab.com --docker-username=... \
--docker-email=... --docker-password=... \
--dry-run -o yaml | grep .dockerconfigjson: | sed -e 's/.dockerconfigjson://' | sed -e 's/^[ \t]*//'
This token can now be stored as a pulumi secret with
pulumi config set docker_token --secret <your_token>
and be used like this
import {Secret} from "#pulumi/kubernetes/core/v1";
import {Config} from "#pulumi/pulumi";
/**
* Creates a docker registry secret to pull images from private registries
*/
export class DockerRegistry {
constructor(provider: any) {
const config = new Config();
const dockerToken = config.require("docker_token");
new Secret("docker-registry-secret", {
metadata: {
name: "docker-registry-secret"
},
data: {
".dockerconfigjson": dockerToken
},
type: "kubernetes.io/dockerconfigjson"
}, {provider});
}
}

Unable to Connect Python App to RabbitMQ in Kube pods using ClusterIP

I am having trouble linking up my python application pod to my rabbit mq pod. I am new to the k8s world - so any and all help is much appreciated. This steps I have taken are as follows:
Building docker image tagged as adapteremulator-container:latest
applying config files
running docker build adapteremulator-container:latest .
I am getting this connection error.
Traceback (most recent call last):
File "Emulator.py", line 17, in <module>
RMQ = rabbit(config["rabbitMQ"])
File "/app/RabbitClass.py", line 20, in __init__
self.createChannel()
File "/app/RabbitClass.py", line 30, in createChannel
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host, port=self.port, virtual_host=self.virtualHost, credentials=self.credentials))
File "/usr/local/lib/python3.7/site-packages/pika/adapters/blocking_connection.py", line 377, in __init__
self._process_io_for_connection_setup()
File "/usr/local/lib/python3.7/site-packages/pika/adapters/blocking_connection.py", line 417, in _process_io_for_connection_setup
self._open_error_result.is_ready)
File "/usr/local/lib/python3.7/site-packages/pika/adapters/blocking_connection.py", line 469, in _flush_output
raise maybe_exception
File "/usr/local/lib/python3.7/site-packages/pika/adapters/base_connection.py", line 176, in _adapter_connect
socket.IPPROTO_TCP)
File "/usr/local/lib/python3.7/site-packages/pika/adapters/base_connection.py", line 304, in _getaddrinfo
return socket.getaddrinfo(host, port, family, socktype, proto)
File "/usr/local/lib/python3.7/socket.py", line 748, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
socket.gaierror: [Errno -3] Try again
Dockerfile:
FROM python:3.7-alpine
WORKDIR /app
COPY requirements.txt ./Emulator/ /app/
RUN apk add --no-cache build-base --virtual .install-deps \
&& ln -s /usr/include/locale.h /usr/include/xlocale.h \
&& pip install --no-cache-dir -r requirements.txt \
&& apk del .install-deps
EXPOSE 5000
ENTRYPOINT [ "python" ]
CMD [ "emulator.py" ]
Here are my 3 config files:
apiVersion: apps/v1
kind: Deployment
metadata:
name: emulator-deployment
spec:
replicas: 1
selector:
matchLabels:
component: adapteremulator
template:
metadata:
labels:
component: adapteremulator
spec:
containers:
- name: adapteremulator-container
image: adapteremulator-container:latest
imagePullPolicy: "IfNotPresent"
# rabbitmq-management-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: rabbitmq-deploy
spec:
replicas: 1
selector:
matchLabels:
name: rabbitmq-pod
template:
metadata:
labels:
name: rabbitmq-pod
spec:
restartPolicy: Always
containers:
- name: rabbitmq-container
image: rabbitmq:3.7.8-management
apiVersion: v1
kind: Service
metadata:
name: rabbitmq-cluster-ip-service
spec:
type: ClusterIP
selector:
name: rabbitmq-pod
ports:
- port: 5672
targetPort: 5672
Here is my JobConfig & RabbitMQ
class rabbit():
def __init__(self, config):
#unpack configuration
self.host = config["host"]
self.port = config["port"]
self.user = config["user"]
self.password = config["password"]
self.virtualHost = config["virtualHost"]
self.credentials = pika.PlainCredentials(self.user, self.password)
#set internal variables
self.id = uuid.uuid4()
self.createChannel()
self.queues = {}
self.messageSendHistory = []
self.messageReceiveHistory = []
self.activeQueue = None
self.activeQueueName = None
self.callbackFunction = None
def createChannel(self):
#establish connection with RabbitMQ server
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host, port=self.port, virtual_host=self.virtualHost, credentials=self.credentials))
channel = connection.channel()
self.channel = channel
print("\nRabbitMQ channel successfully created: channel == {0} >>> host == {1} >>> port == {2} >>> virtual host == {3}\n".format(channel, self.host, self.port, self.virtualHost))
return channel
def createQueue(self, name):
#create new queue on channel & make active
newQueue = self.channel.queue_declare(name)
self.queues.update({name: newQueue})
self.activateQueue(name)
print("\nRabbitMQ queue successfully created: name == {0} >>> queue == {1}\n".format(name, newQueue))
return newQueue
def activateQueue(self, name):
#make queue active
self.activeQueueName = name
self.activeQueue = self.queues[name]
print("\nRabbitMQ queue successfully set as active: name == {0} >>> queue == {1}\n".format(self.activeQueueName, self.activeQueue))
def produce(self, msg):
#send message to queue
#setup exchange
#routing_key is queue name
#body is actual message
self.channel.basic_publish(exchange="", routing_key=self.activeQueueName, body=msg)
self.messageSendHistory.append([self.activeQueueName, msg])
print("\nmsg == {0} >>> queue: {1}\n".format(msg, self.activeQueueName))
print("Message successfully added to send history\n")
def callback(self, ch, method, properties, msg):
#callback function for receiving messages
#add consumed message to queue of alarm messages
self.messageReceiveHistory.append([self.activeQueueName, msg])
print("\nqueue: {0} >>> msg: {1}".format(self.activeQueueName, msg))
print("Message successfully consumed from queue and added to receive history\n")
if self.callbackFunction != None:
self.callbackFunction(ch, method, properties, msg)
print("\nWaiting to receive messages from queue: name == {0}".format(self.activeQueueName))
return
def consume(self, callbackFunction):
#infinite loop to consume messages from the queue
#assign callback function to receive messages from queue
self.callbackFunction = callbackFunction
self.channel.basic_consume(self.callback, queue=self.activeQueueName, no_ack=True)
print("\nWaiting to receive messages from queue: name == {0}".format(self.activeQueueName))
self.channel.start_consuming()
def close(self):
#gently close the connection
#flushes network buffer
self.connection.close()
print("\nRabbitMQ connection successfully closed: {0}\n".format(self.connection))
def purgeQueue(self):
#purge all messages from a given queue
self.channel.queue_purge(queue=self.activeQueueName)
print("\nRabbitMQ queue successfully purged: name == {0}".format(self.activeQueueName))
Job Config
{
"seconds": 1000,
"rabbitMQ": {
"host": "rabbitmq-cluster-ip-service",
"port": 5672,
"user": "guest",
"password": "guest",
"virtualHost": "/"
},

Resources