ERLANG wx widget load png - erlang

I tried to build a GUI that includes a PNG photo.
The code of the PNG loading works fine when loads on to an empty frame,
but on this code i cannot see the photo.
I tried to change options of the wxBoxSizers and the location {0,0} on the panel, but i still cannot see the photo.
Any help?
First, i set up all of the frames and components
-module(neuron_wx).
-author("adisolo").
%% API
-export([start/0, handleButtonStart/2]).
-include_lib("wx/include/wx.hrl").
-record(data, {env, file}).
%% Will get the pid of server
%% will send the information on button pressing
start() ->
%%Frame and components build
WX = wx:new(),
Frame = wxFrame:new(wx:null(), 1, "Top Frame"),
TopTxt = wxStaticText:new(Frame, ?wxID_ANY, "Analog Neuron final Project"), %%?wxID_ANY
%L Components
TextConfiguration = wxStaticText:new(Frame, ?wxID_ANY, "Program Configuration"), %%?wxID_ANY
TextSetNumNeurons = wxStaticText:new(Frame, ?wxID_ANY, "Enter number of Neurons per Layer"), %%?wxID_ANY
TextCtrlNeurons = wxTextCtrl:new(Frame, ?wxID_ANY, [{value, "example:4 3 6 7"}]),
ButtonBuild = wxButton:new(Frame, ?wxID_ANY, [{label, "Build"}]), %{style, ?wxBU_LEFT}
FilePickerInput = wxFilePickerCtrl:new(Frame, ?wxID_ANY),
ButtonStart = wxButton:new(Frame, ?wxID_ANY, [{label, "Start"}]),
%Buttons
wxButton:connect(ButtonStart, command_button_clicked, [{callback, fun handleButtonStart/2}, {userData, #data{env = wx:get_env(), file=FilePickerInput}}]),
This is where i put in the panel and used the picture
%R Components
TextNet = wxStaticText:new(Frame, ?wxID_ANY, "Net Description"), %%?wxID_ANY
%% panel for picture
Panel = wxPanel:new(Frame),
%% bitmap
PictureDraw = wxImage:new("Erlang_logo.png"),
Picture = wxBitmap:new(PictureDraw),
wxPanel:connect(Panel, paint, [{callback,fun(WxData, _)->panelPictureUpdate(Picture, WxData)end}]),
%3 Components
TextOutput = wxStaticText:new(Frame, ?wxID_ANY, "Program Output"), %%?wxID_ANY
%%Font set
Font = wxFont:new(20, ?wxFONTFAMILY_ROMAN, ?wxFONTSTYLE_NORMAL, ?wxFONTWEIGHT_NORMAL),
wxTextCtrl:setFont(TopTxt, Font),
Font2 = wxFont:new(18, ?wxFONTFAMILY_ROMAN, ?wxFONTSTYLE_NORMAL, ?wxFONTWEIGHT_NORMAL),
wxTextCtrl:setFont(TextConfiguration, Font2),
wxTextCtrl:setFont(TextOutput, Font2),
wxTextCtrl:setFont(TextNet, Font2),
Font3 = wxFont:new(12, ?wxFONTFAMILY_ROMAN, ?wxFONTSTYLE_NORMAL, ?wxFONTWEIGHT_NORMAL),
wxTextCtrl:setFont(TextSetNumNeurons, Font3),
%%Sizer Attachment
MainSizer = wxBoxSizer:new(?wxVERTICAL),
MainSizer2 = wxBoxSizer:new(?wxHORIZONTAL),
MainSizerL = wxBoxSizer:new(?wxVERTICAL),
MainSizerR = wxBoxSizer:new(?wxVERTICAL),
MainSizer3 = wxBoxSizer:new(?wxVERTICAL),
wxSizer:add(MainSizer, TopTxt, [{flag, ?wxALIGN_TOP bor ?wxALIGN_CENTER}, {border, 5}]),
wxSizer:add(MainSizer, MainSizer2), %,[{flag, ?wxALIGN_CENTER}]),
wxSizer:add(MainSizer, MainSizer3),
wxSizer:add(MainSizer2, MainSizerL, [{border, 5}]),%{flag, ?wxALIGN_LEFT},
wxSizer:add(MainSizer2, MainSizerR, [{border, 5}]),%{flag, ?wxALIGN_RIGHT},
%% Assign to L
lists:foreach(fun(X)-> wxSizer:add(MainSizerL, X, [{flag, ?wxALL bor ?wxEXPAND}, {border, 8}]) end,
[TextConfiguration, TextSetNumNeurons, TextCtrlNeurons, ButtonBuild, FilePickerInput, ButtonStart]),
%wxSizer:add(MainSizerL, TextConfiguration, [{flag, ?wxALL bor ?wxEXPAND}, {border, 5}]),
%wxSizer:add(MainSizerL, TextSetNumNeurons, [{flag, ?wxALL bor ?wxEXPAND}, {border, 5}]),
%wxSizer:add(MainSizerL, TextCtrlL, [{flag, ?wxALL bor ?wxEXPAND}, {border, 5}]),
%% Assign to R
wxSizer:add(MainSizerR, TextNet, [{flag, ?wxALL bor ?wxALIGN_CENTRE }, {border, 8}]),
wxSizer:add(MainSizerR, Panel, [{flag, ?wxEXPAND}]),%, {proportion, 1}, {border, 8}]),
%% Assign to 3
wxSizer:add(MainSizer3, TextOutput, [{flag, ?wxALL bor ?wxALIGN_CENTRE }, {border, 8}]),
wxWindow:setSizer(Frame, MainSizer),
%%Show Frame
wxFrame:show(Frame).
handleButtonStart(WxData,_)->
%Get the userdata
Data=WxData#wx.userData,
wx:set_env(Data#data.env),
FilePicker = Data#data.file,
%Use the info
Frame = wxFrame:new(wx:null(), ?wxID_ANY, "Print"),
Text=io_lib:format("The file is: ~p~n", [wxFilePickerCtrl:getPath(FilePicker)]),
wxStaticText:new(Frame, ?wxID_ANY, Text),
wxFrame:show(Frame).
This is the draw of the picture to the panel (a callback of the panel 'paint')
I assume this isn't the problem in my code because it did work on the empty frame i tried.
% upload the picture to the panel
panelPictureUpdate(Picture, #wx{obj =Panel} ) ->
%% display picture
DC = wxPaintDC:new(Panel),
wxDC:drawBitmap(DC, Picture, {0,0}),
wxPaintDC:destroy(DC),
ok.

The problem was that the picture was not scaled correctly.
I used the paint handle of the panel:
wxPanel:connect(Panel, paint, [{callback,fun(WxData, _)->panelPictureUpdate({Frame,PictureDraw}, WxData)end}])
to redraw the picture each resize of the window.
{Width, Height} = wxPanel:getSize(Panel),
PictureDrawScaled = wxImage:scale(PictureDraw, Width, Height),
%% display picture
Picture = wxBitmap:new(PictureDrawScaled),
DC = wxPaintDC:new(Panel),
wxDC:drawBitmap(DC, Picture, {0,0}),
wxPaintDC:destroy(DC)
This way the picture is scaled to the size of the panel.

There is too much code here to read it comfortably, especially for someone who doesn't really know Erlang, but the problem is either that the picture is not being loaded correctly or that the panel is not sized correctly.
To check for the former, you should verify if the image/bitmap is valid (using its IsOk()). To check for the latter, you need to check that the panel is given enough space to expand to not only in its immediate parent sizer, but that this sizer itself is also positioned and sized correctly.
Finally, notice that you don't really need to draw the picture yourself anyhow, you could just use wxStaticBitmap instead.

Related

How to avoid connecting all the points of a function graph with Plotly

In a program that revolves around maths, I find myself using Plotly.NET (F#) to display user-defined functions. This works quite well, but there are cases where a function has discontinuities or even chunks defined over certain regions. For example, for the function f(x) defined by 0 if x <= 0 and 10 elsewhere, the expected graph (I used Wolfram Alpha here) is:
With Plotly and the code below,
let fn x = if x <= 0.0 then 0.0 else 10.0
let xs = [ -10.0 .. 0.1 .. 10.0 ]
let ys = Seq.map fn xs
Chart.Line(xs, ys, UseDefaults = false)
|> Chart.withTitle #"$f(x)$"
|> Chart.savePNG("example")
I get this graph:
As you can see, Plotly connects two points that shouldn't be connected (and I don't blame it, that's how the lib works). I wonder then how to avoid this kind of behaviour, which often happens with piecewise defined functions.
If possible, I would like a solution that is general enough to be applied to all functions / graphs, as my program does not encode functions in advance, the user enters them. The research I've done doesn't lead me anywhere, unfortunately, and the documentation doesn't show an example for what I want.
PS: also, you may have noticed, Plotly doesn't display the LaTex in the exported image, according to my research this is a known issue with Python, but if you know how to solve this with the .NET version of the lib, I'm also interested!
I don't think there's any way for Plotly to know that the function is discontinuous. Note that the vertical portion of your chart isn't truly vertical, because x jumps from 0.0 to 0.1.
However, you can still achieve the effect you're looking for by creating a separate chart for each piece of the function, and then combining them:
let color = Color.fromString "Blue"
let xsA = [ -10.0 .. 0.0 ]
let ysA = xsA |> Seq.map (fun _ -> 0.0)
let chartA = Chart.Line(xsA, ysA, LineColor = color)
let xsB = [ 0.0 .. 10.0 ]
let ysB = xsB |> Seq.map (fun _ -> 10.0)
let chartB = Chart.Line(xsB, ysB, LineColor = color)
[ chartA; chartB ]
|> Chart.combine
|> Chart.withLegend false
|> Chart.show
Note that there are actually two distinct points for x = 0 in the combined chart, so it's technically not a function. (Perhaps there's some way to show that the top piece is open, while the bottom piece is closed in Plotly, but I don't know how.) Result is:

detect object in image with almost similar background

I have to detect mice in a cage, input images look like following:
at the moment I am using cv.createBackgroundSubtractorMOG2() in the video stream to find the area containing the mice and afterwards Canny Edge detector to extract the contours of the mice.
However, this is not working that well.. the more the mice is moving the better, but I guess there could be a better approach to detect the mice.
Does anyne have a different idea how to detect the mice?
thanks in advance
After subtracting the background, you could use a threshold to remove noise. Try saving the subtracted image and seeing what it looks like. Here's a script I use to tweak filter parameters (run it with the subtracted image):
import cv2
import numpy as np
screenshot_path = 'screenshot.bmp'
def nothing(x):
pass
# Creating a window for later use
cv2.namedWindow('mask', cv2.WINDOW_NORMAL)
cv2.namedWindow('trackbar', cv2.WINDOW_NORMAL)
# Starting with 100's to prevent error while masking
h, s, v = 100, 100, 100
# Creating track bar
cv2.createTrackbar('h', 'trackbar', 0, 180, nothing)
cv2.createTrackbar('s', 'trackbar', 0, 255, nothing)
cv2.createTrackbar('v', 'trackbar', 164, 255, nothing)
cv2.createTrackbar('h2', 'trackbar', 120, 180, nothing)
cv2.createTrackbar('s2', 'trackbar', 12, 255, nothing)
cv2.createTrackbar('v2', 'trackbar', 253, 255, nothing)
frame = cv2.imread(screenshot_path)
# converting to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
while (1):
# get info from track bar and appy to result
h = cv2.getTrackbarPos('h', 'trackbar')
s = cv2.getTrackbarPos('s', 'trackbar')
v = cv2.getTrackbarPos('v', 'trackbar')
h2 = cv2.getTrackbarPos('h2', 'trackbar')
s2 = cv2.getTrackbarPos('s2', 'trackbar')
v2 = cv2.getTrackbarPos('v2', 'trackbar')
# Normal masking algorithm
lower = np.array([h, s, v])
upper = np.array([h2, s2, v2])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.imshow('result', result)
print(h, s, v, h2, s2, v2)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
If that doesn't work, I would use an object tracker API like CSRT
# python opencv_object_tracking.py
# python opencv_object_tracking.py --video dashcam_boston.mp4 --tracker csrt
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
tracker = cv2.Tracker_create(args["tracker"].upper())
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the FPS throughput estimator
fps = None
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame (so we can process it faster) and grab the
# frame dimensions
frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
# update the FPS counter
fps.update()
fps.stop()
# initialize the set of information we'll be displaying on
# the frame
info = [
("Tracker", args["tracker"]),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()

Standalone Indicator inTradingview

I have written this strategy with 3 MA (18ema, 50ema and 200sma)...
On my 15min chart (which I use for trading), if 18ema is below 50ema and 50ema is below 200sma and also 18ema MUST be below 50ema and 50ema MUST be below 200sma, then I plot red diamond on top of the chart like so:
Signal
This would be to look for short entry. The opposite would be for long... So far so good. What I am trying to do is instead of flipping through the chart to look for these setups, is there a way to do something like this instead:
standalone indicator
if 18ema is below 50ema and 50ema is below 200sma show Red diamond in M15 box just like above picture
if 18ema is below 50ema and 50ema is below 200sma show Red diamond in 1HR box just like above picture for currency pair AUDUSD... repeat the same for x amount of currency... again, just like the picture above. The bottom line is to have one place to look for the setup instead of flipping through the charts.
Here is the code that I have which I am currently using as shown on the very first picture above:
//#version=3
study(title="Inpulse Pullback 15", overlay=true)
s18ema = ema(close, 18) //slower 18EMA
s50ema = ema(close, 50) //medium 50EMA
s200sma = sma(close, 200) //slowest 200SMA
price = open[1]
s2 = security(tickerid, "60", s18ema) // 60 Minutes
s3 = security(tickerid, "60", s50ema) // 60 Minutes
s4 = security(tickerid, "60", s200sma) // 60 Minutes
plot(s18ema, title="EMA 18", color = aqua, linewidth = 1, transp=0)
plot(s50ema, title="EMA 50", color = orange, linewidth = 2, transp=0)
plot(s200sma, title="SMA 200", color = blue, linewidth = 2, transp=0)
longCondSig = (s18ema > s50ema) and (s50ema > s200sma) and (s2 > s3) and (s3 > s4)
shortCondSig = (s18ema < s50ema) and (s50ema < s200sma) and (s2 < s3) and (s3 < s4)
plotshape(series=longCondSig, title="Long", style=shape.diamond, location=location.top, color=#29ee0b, transp=0, size=size.small)
plotshape(series=shortCondSig, title="Short", style=shape.diamond, location=location.top, color=red, transp=0, size=size.small)
longEntry = longCondSig ==1
shortEntry = shortCondSig ==1
alertcondition(longEntry, "le", "le")
alertcondition(shortEntry, "se", "se")

Deedle missing values after grouping

I have two frames, each of which contains some IDs and zero to many measures for each ID. I want to get the average measure per ID for each frame and combine to a larger frame.
The problem is that when an ID does not appear in one of the two frames, after grouping it results in a missing value in the combined frame. Here is an example. Notice ID "Chris" does not appear in frame A.
let aF = frame [ "AID" =?> Series.ofValues [ "Andrew"; "Andrew"; "Andrew"]; "AMES" =?> Series.ofValues [ 2; 4; 3]]
let bF = frame [ "BID" =?> Series.ofValues [ "Andrew"; "Chris"; "Andrew"]; "BMES" =?> Series.ofValues [ 1; 6; 7]]
let groupF = frame [ "AG" => (aF |> Frame.groupRowsByString "AID" |> Frame.getCol "AMES") ; "BG" => (bF |> Frame.groupRowsByString "BID" |> Frame.getCol "BMES") ]
let groupFMean = groupF |> Frame.getNumericCols |> Series.mapValues (Stats.levelMean fst) |> Frame.ofColumns |> Frame.fillMissingWith 0
groupFMean.SaveCsv( "tgroupFMean.csv", includeRowKeys=true, keyNames=["Id"] )
The resulting table looks like this:
Id AG BG
Andrew 3 4
Chris 6
And the blank cell is "". I've tried variations with fillMissingWith 0 (at series and and frame level) without success.
The answer is not very obvious - the problem is that fillMissingWith only touches columns that have the same type as the value you are using to fill the data - so for example, fillMissingWith "Unknown" would only fill missing values in columns that are string.
In your case, Frame.fillMissingWith 0 is only applied to columns of type int and there are no such columns. If you use Frame.fillMissingWith 0.0, things work as expected!
PS: If you have any thoughts on how this could be done better, please let us know. I'm really not sure what the right behavior is here!

How to make ImageTransformation produce an anamorphic version of image

I'm experimenting with the ImageTransformation function to try to make anamorphic versions of images, but with limited progress so far. I'm aiming for the results you get using the image reflected in a cylindrical mirror, where the image curves around the central mirror for about 270 degrees. The wikipedia article has a couple of neat examples (and I borrowed Holbein's skull from them too).
i = Import["../Desktop/Holbein_Skull.jpg"];
i = ImageResize[i, 120]
f[x_, y_] := {(2 (y - 0.3) Cos [1.5 x]), (2 (y - 0.3) Sin [1.5 x])};
ImageTransformation[i, f[#[[1]], #[[2]]] &, Padding -> White]
But I can't persuade Mathematica to show me the entire image, or to bend it correctly. The anamorphic image should wrap right round the mirror placed "inside" the centre of the image, but it won't. I found suitable values for constants by putting it inside a manipulate (and turning the resolution down :). I'm using the formula:
x1 = a(y + b) cos(kx)
y1 = a(y + b) sin(kx)
Any help producing a better result would be greatly appreciated!
In ImageTransformation[f,img], the function f is such that a point {x,y} in the resulting image corresponds to f[{x,y}] in img. Since the resulting image is basically the polar transformation of img, f should be the inverse polar transformation, so you could do something like
anamorphic[img_, angle_: 270 Degree] :=
Module[{dim = ImageDimensions[img], rInner = 1, rOuter},
rOuter = rInner (1 + angle dim[[2]]/dim[[1]]);
ImageTransformation[img,
Function[{pt}, {ArcTan[-#2, #1] & ## pt, Norm[pt]}],
DataRange -> {{-angle/2, angle/2}, {rInner, rOuter}},
PlotRange -> {{-rOuter, rOuter}, {-rOuter, rOuter}},
Padding -> White
]
]
The resulting image looks something like
anamorphic[ExampleData[{"TestImage", "Lena"}]]
Note that you can a similar result with ParametricPlot and TextureCoordinateFunction, e.g.
anamorphic2[img_Image, angle_: 270 Degree] :=
Module[{rInner = 1,rOuter},
rOuter = rInner (1 + angle #2/#1 & ## ImageDimensions[img]);
ParametricPlot[{r Sin[t], -r Cos[t]}, {t, -angle/2, angle/2},
{r, rInner, rOuter},
TextureCoordinateFunction -> ({#3, #4} &),
PlotStyle -> {Opacity[1], Texture[img]},
Mesh -> None, Axes -> False,
BoundaryStyle -> None,
Frame -> False
]
]
anamorphic2[ExampleData[{"TestImage", "Lena"}]]
Edit
In answer to Mr.Wizard's question, if you don't have access to ImageTransformation or Texture you could transform the image data by hand by doing something like
anamorph3[img_, angle_: 270 Degree, imgWidth_: 512] :=
Module[{data, f, matrix, dim, rOuter, rInner = 1.},
dim = ImageDimensions[img];
rOuter = rInner (1 + angle #2/#1 & ## dim);
data = Table[
ListInterpolation[#[[All, All, i]],
{{rOuter, rInner}, {-angle/2, angle/2}}], {i, 3}] &#ImageData[img];
f[i_, j_] := If[Abs[j] <= angle/2 && rInner <= i <= rOuter,
Through[data[i, j]], {1., 1., 1.}];
Image#Table[f[Sqrt[i^2 + j^2], ArcTan[i, -j]],
{i, -rOuter, rOuter, 2 rOuter/(imgWidth - 1)},
{j, -rOuter, rOuter, 2 rOuter/(imgWidth - 1)}]]
Note that this assumes that img has three channels. If the image has fewer or more channels, you need to adapt the code.

Resources