Want to improve the love2d code for performance - lua

I am making a platformer where i am generating level with below code:
function LevelMaker.generateLevel3(width, height)
local tiles = {}
local objects = {}
local entities = {}
for y = 1, height do
table.insert(tiles, {})
for x = 1, width do
if y == 9 and (x > 5 and x < 12) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 12) and (x > 11 and x < 30) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 3) and (x == 13) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 17) and (x > 15 and x < 28) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 9) and (x > 31 and x < 35) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 6) and (x > 25 and x < 29) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y > 1 and y < 7) and (x == 26) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 0) and (x > 3 and x < 10) then
table.insert(tiles[y], Tile(x, y, math.random(1, 3)))
elseif (y == 13) and (x > 22 and x < 30) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = 2,
collidable = true,
solid = true,
deadly = true
}
)
elseif (y == 3) and (x == 12) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = 4,
collidable = true,
solid = true,
deadly = true
}
)
elseif (y == 2 or y == 4) and (x == 13) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = y == 2 and 1 or 2,
collidable = true,
solid = true,
deadly = true
}
)
elseif (y == 8) and (x == 10 or x == 11) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = 1,
collidable = true,
solid = true,
deadly = true
}
)
elseif (y > 1 and y < 7) and (x == 25) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = 4,
collidable = true,
solid = true,
deadly = true
}
)
elseif (y == 8) and (x == 34) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = 1,
collidable = true,
solid = true,
deadly = true
}
)
elseif (y == 1) and (x > 3 and x < 10) then
table.insert(tiles[y], Tile(x, y, SKY))
table.insert(objects,
GameObject{
texture = 'spikes',
x = (x - 1) * TILE_SIZE, y = (y - 1) * TILE_SIZE,
width = TILE_SIZE, height = TILE_SIZE,
frame = 2,
collidable = true,
solid = true,
deadly = true
}
)
else
table.insert(tiles[y], Tile(x, y, SKY))
end
end
end
table.insert(objects,
GameObject{
texture = 'house',
x = (16 - 1) * TILE_SIZE, y = (13 - 1) * TILE_SIZE,
width = 36, height = 44,
frame = 1,
}
)
table.insert(objects,
GameObject{
texture = 'gems',
x = (28 - 1) * TILE_SIZE, y = (5 - 1) * TILE_SIZE,
width = 15, height = 11,
frame = {1,2,3,4,5},
}
)
local map = TileMap(width, height)
map.tiles = tiles
return GameLevel(entities, objects, map)
end
Is there a way to improve the same above code (improve time complexity)
When the player dies over and over within 1-2 sec the game freezes.
I want to load the level as fast as possible

What sticks out to me is that you're running two nested for loops over X and Y just to draw a couple lines, then checking whether the points are on the line using range checks to eventually place tiles; the only thing that seems to be randomized is the tile type, but not the tile position. You should store a list of lines and draw each line on the screen with randomized tiles:
for y = 1, height do -- prepare the grid
table.insert(tiles, {})
end
local lines = {{from = 6, to = 11, y = 9}, {from = ..., to = ...}, {...}, ...}
for _, line in pairs(lines) do -- draw lines
if line.y then
for x = line.from, line.to do
table.insert(tiles[line.y], Tile(x, line.y, math.random(1, 3)))
end
elseif line.x then
for y = line.from, line.to do
table.insert(tiles[y], Tile(line.x, y, math.random(1, 3)))
end
end
end
the fixed sky/spikes/house/gems tiles can benefit from the same technique; you'll have to store a line tile type with each line and use it instead of the random tile though. These seem to be static though - why can't you just reuse the old level and replace only the randomized tiles using the described "line drawing" technique?

Related

i can't grab frame 2 object has no attribute

i want to make a separate tracking but the total inside is the sum of each track but it just keep giving me frame2 = imutils.resize(frame2, width = 500)
pture_MSMF::grabFr File "D:\pyli\lib\site-packages\imutils\convenience.py", line 69, in resize
ame videoio(MSMF): can't grab frame. Error: -2147023901
(h, w) = image.shape[:2].
from mylib.centroidtracker import CentroidTracker
from mylib.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
from mylib.mailer import Mailer
from mylib import config, thread
import time, schedule, csv
import numpy as np
import argparse, imutils
import time, dlib, cv2, datetime
from itertools import zip_longest
t0 = time.time()
def run():
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
net2 = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
if not args.get("input", False):
print("[INFO] Starting live cam 1 & 2..")
vs = VideoStream(config.url).start()
vs2 = VideoStream(config.url1).start()
time.sleep(2.0)
writer = None
W = None
H = None
ct = CentroidTracker(maxDisappeared=10, maxDistance=100)
trackers = []
trackableObjects = {}
ct2 = CentroidTracker(maxDisappeared=10, maxDistance=100)
trackers2 = []
trackableObjects2 = {}
totalFrames = 0
totalDown = 0
totalUp = 0
x = []
empty=[]
empty1=[]
totalFrames2 = 0
totalDown2 = 0
totalUp2 = 0
x2 = []
empty2=[]
empty3=[]
fps = FPS().start()
if config.Thread:
vs = thread.ThreadingClass(config.url)
vs2 = thread.ThreadingClass(config.url1)
while True:
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
frame2 = vs2.read()
frame2 = frame2[1] if args.get("input", False) else frame2
frame = imutils.resize(frame, width = 500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame2 = imutils.resize(frame2, width = 500)
rgb2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
if W is None or H is None:
(H, W) = frame.shape[:2]
(H, W) = frame2.shape[:2]
status = "Waiting"
rects = []
status2 = "Waiting"
rects2 = []
if totalFrames % args["skip_frames"] == 0:
status = "Detecting"
trackers = []
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
trackers.append(tracker)
else:
for tracker in trackers:
status = "Tracking"
tracker.update(rgb)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
rects.append((startX, startY, endX, endY))
if totalFrames2 % args["skip_frames"] == 0:
status2 = "Detecting"
trackers2 = []
blob2 = cv2.dnn.blobFromImage(frame2, 0.007843, (W, H), 127.5)
net2.setInput(blob2)
detections2 = net2.forward()
for i in np.arange(0, detections2.shape[2]):
confidence2 = detections2[0, 0, i, 2]
if confidence2 > args["confidence"]:
idx2 = int(detections2[0, 0, i, 1])
if CLASSES[idx2] != "person":
continue
box2 = detections2[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX2, startY2, endX2, endY2) = box2.astype("int")
trackers2 = dlib.correlation_tracker()
rects2 = dlib.rectangle(startX2, startY2, endX2, endY2)
tracker2.start_track(rgb2, rects2)
trackers2.append(tracker2)
else:
for tracker2 in trackers2:
status2 = "Tracking"
tracker2.update(rgb2)
pos2 = tracker2.get_position()
startX2 = int(pos2.left())
startY2 = int(pos2.top())
endX2 = int(pos2.right())
endY2 = int(pos2.bottom())
rects2.append((startX2, startY2, endX2, endY2))
cv2.line(frame, (0, H // 2), (W, H // 2), (0, 0, 0), 3)
cv2.putText(frame, "-Prediction border - Entrance-", (10, H - ((i * 20) + 200)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.line(frame2, (0, H // 2), (W, H // 2), (0, 0, 0), 3)
cv2.putText(frame2, "-Prediction border - Entrance-", (10, H - ((i * 20) + 200)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
objects = ct.update(rects)
objects2 = ct2.update(rects2)
for (objectID, centroid) in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
if not to.counted:
if direction < 0 and centroid[1] < H // 2:
totalUp += 1
empty.append(totalUp)
to.counted = True
elif direction > 0 and centroid[1] > H // 2:
totalDown += 1
empty1.append(totalDown)
if sum(x) >= config.Threshold:
cv2.putText(frame, "-ALERT: People limit exceeded-", (10, frame.shape[0] - 80),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)
if config.ALERT:
print("[INFO] Sending email alert..")
Mailer().send(config.MAIL)
print("[INFO] Alert sent")
to.counted = True
x = []
x.append(len(empty1)-len(empty))
trackableObjects[objectID] = to
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255), -1)
for (objectID2, centroid2) in objects2.items():
to2 = trackableObjects2.get(objectID2, None)
if to2 is None:
to2 = TrackableObject(objectID2, centroid2)
else:
y2 = [c[1] for c in to2.centroids]
direction2 = centroid2[1] - np.mean(y2)
to2.centroids.append(centroid2)
if not to2.counted2:
if direction2 < 0 and centroid2[1] < H // 2:
totalUp2 += 1
empty2.append(totalUp2)
to2.counted2 = True
elif direction2 > 0 and centroid2[1] > H // 2:
totalDown2 += 1
empty3.append(totalDown2)
if sum(x) >= config.Threshold:
cv2.putText(frame2, "-ALERT: People limit exceeded-", (10, frame2.shape[0] - 80),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)
if config.ALERT:
print("[INFO] Sending email alert..")
Mailer().send(config.MAIL)
print("[INFO] Alert sent")
to2.counted2 = True
x2 = []
x2.append(len(empty3)-len(empty2))
trackableObjects2[objectID2] = to2
text2 = "ID2 {}".format(objectID2)
cv2.putText(frame2, text2, (centroid2[0] - 10, centroid2[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.circle(frame2, (centroid2[0], centroid2[1]), 4, (255, 255, 255), -1)
info = [
("Exit", totalUp),
("Enter", totalDown),
("Status", status),
]
info3 = [
("Total people inside", x+x2),
]
info2 = [
("Exit", totalUp2),
("Enter", totalDown2),
("Status", status2),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
for (i, (k, v)) in enumerate(info3):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (265, H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
text2 = "{}: {}".format(k, v)
cv2.putText(frame2, text2, (265, H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
for (i, (k, v)) in enumerate(info2):
text2 = "{}: {}".format(k, v)
cv2.putText(frame2, text2, (265, H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
if config.Log:
datetimee = [datetime.datetime.now()]
d = [datetimee, empty1+empty3, empty+empty2, x+x2]
export_data = zip_longest(*d, fillvalue = '')
with open('Log.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(("End Time", "In", "Out", "Total Inside"))
wr.writerows(export_data)
if writer is not None:
writer.write(frame)
writer.write(frame2)
cv2.imshow("Real-Time Monitoring/Analysis Window", frame)
cv2.imshow("Real-Time Monitoring 2", frame2)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
totalFrames += 1
totalFrames2 += 1
fps.update()
if config.Timer:
t1 = time.time()
num_seconds=(t1-t0)
if num_seconds > 28800:
break
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
if config.Thread:
vs.release()
vs2.release()
cv2.destroyAllWindows()
if config.Scheduler:
schedule.every().day.at("09:00").do(run)
while 1:
schedule.run_pending()
else:
run()

Highcharts Negative Values Column Graph Bottom Radius

I am having column graph which contains positive and negative values column graph, I need to give the border radius top only for the positive and negative Graph. But if I'm trying to add the border radius top for the negative column graph it was not working. Kindly let me know how to give the only border radius top for negative column graph using highcharts in react js.
refer image here
For example ,in the image provided, for I want border radius at -5% for negative value .For positive value I want border radius at top of bar.
I prepared a custom code that adds the wanted border radius for positive value - on the top, for negative - on the bottom of the column.
$(function() {
'use strict';
(function(factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else {
factory(Highcharts);
}
}(function(Highcharts) {
(function(H) {
H.wrap(H.seriesTypes.column.prototype, 'translate', function(proceed) {
const options = this.options;
const topMargin = options.topMargin || 0;
const bottomMargin = options.bottomMargin || 0;
proceed.call(this);
H.each(this.points, function(point) {
console.log(point)
if (options.customRadius) {
const w = point.shapeArgs.width;
const h = point.shapeArgs.height;
const x = point.shapeArgs.x;
const y = point.shapeArgs.y;
let radiusTopLeft,
radiusTopRight,
radiusBottomRight,
radiusBottomLeft;
if (point.y > 0) {
radiusTopLeft = H.relativeLength(options.customRadius, w);
radiusTopRight = H.relativeLength(options.customRadius, w);
radiusBottomLeft = 0;
radiusBottomRight = 0;
} else {
radiusTopLeft = 0;
radiusTopRight = 0;
radiusBottomRight = H.relativeLength(options.customRadius, w);
radiusBottomLeft = H.relativeLength(options.customRadius, w);
}
const maxR = Math.min(w, h) / 2
radiusTopLeft = radiusTopLeft > maxR ? maxR : radiusTopLeft;
radiusTopRight = radiusTopRight > maxR ? maxR : radiusTopRight;
radiusBottomRight = radiusBottomRight > maxR ? maxR : radiusBottomRight;
radiusBottomLeft = radiusBottomLeft > maxR ? maxR : radiusBottomLeft;
point.dlBox = point.shapeArgs;
point.shapeType = 'path';
point.shapeArgs = {
d: [
'M', x + radiusTopLeft, y + topMargin,
'L', x + w - radiusTopRight, y + topMargin,
'C', x + w - radiusTopRight / 2, y, x + w, y + radiusTopRight / 2, x + w, y + radiusTopRight,
'L', x + w, y + h - radiusBottomRight,
'C', x + w, y + h - radiusBottomRight / 2, x + w - radiusBottomRight / 2, y + h, x + w - radiusBottomRight, y + h + bottomMargin,
'L', x + radiusBottomLeft, y + h + bottomMargin,
'C', x + radiusBottomLeft / 2, y + h, x, y + h - radiusBottomLeft / 2, x, y + h - radiusBottomLeft,
'L', x, y + radiusTopLeft,
'C', x, y + radiusTopLeft / 2, x + radiusTopLeft / 2, y, x + radiusTopLeft, y,
'Z'
]
};
}
});
});
}(Highcharts));
}));
Demo: http://jsfiddle.net/BlackLabel/okn8qhdb/

How to get the function to be computed differently for different values of x?

Func support("support");
Expr left_x = clamp(x, 0, left_buffer.width() / 4);
RDom scan_left(0, left_buffer.width() / 4, 0, left_buffer.height());
scan_left.where(scan_left.x != left_x && scan_left.y != y);
support(x, y) = argmin(abs(output_x(left_x, y) - output_x(scan_left.x, scan_left.y)) + abs(output_y(left_x, y) - output_y(scan_left.x, scan_left.y)));
Expr center_x = clamp(x, left_buffer.width() / 4, left_buffer.width() * 3/4);
RDom scan_center(-left_buffer.width() / 4, left_buffer.width() / 2, 0, left_buffer.height());
scan_center.where(scan_center.x != 0 && scan_center.y != 0);
support(x, y) = argmin(abs(output_x(center_x, y) - output_x(center_x + scan_center.x, scan_center.y)) + abs(output_y(center_x, scan_center.y) - output_y(center_x + scan_center.x, scan_center.y)));
Expr right_x = clamp(x, left_buffer.width() * 3/4, left_buffer.width());
RDom scan_right(left_buffer.width() * 3/4, left_buffer.width() / 4, 0, left_buffer.height());
scan_right.where(scan_right.x != right_x && scan_right.y != y);
support(x, y) = argmin(abs(output_x(right_x, y) - output_x(scan_right.x, scan_right.y)) + abs(output_y(right_x, y) - output_y(scan_right.x, scan_right.y)));
support.trace_stores();
Realization r = support.realize(left_buffer.width(), left_buffer.height());
The function "support" should be computed differently depending on the x value. For x = [0, width/4] compute it according to the first definition, for x = [width/4, width * 3/4] compute it according to the second definition, and for x = [width*3/4, width] compute it according the third definition. I would think that putting the boundary conditions with respect to those update definitions and then realizing over the whole buffer would do the trick. Right now though, the previous definitions are being overwritten. Since this doesn't seem to work, I would think of doing three realizations but that seems inelegant since we're talking about just one image. Is it possible to achieve the result in a single realization, or must I break into three realizations?
I tried RDoms too:
Func support("support");
support(x, y) = Tuple(i32(0), i32(0), f32(0));
RDom left_x(0, left_buffer.width() / 4);
RDom scan_left(0, left_buffer.width() / 4, 0, left_buffer.height());
scan_left.where(scan_left.x != left_x && scan_left.y != y);
support(left_x, y) = argmin(scan_left, abs(output_x(left_x, y) - output_x(scan_left.x, scan_left.y)) + abs(output_y(left_x, y) - output_y(scan_left.x, scan_left.y)));
RDom center_x(left_buffer.width() / 4, left_buffer.width() / 2);
RDom scan_center(-left_buffer.width() / 4, left_buffer.width() / 2, 0, left_buffer.height());
scan_center.where(scan_center.x != 0 && scan_center.y != 0);
support(center_x, y) = argmin(scan_center, abs(output_x(center_x, y) - \
output_x(center_x + scan_center.x, scan_center.y)) + abs(output_y(center_x, scan_center.y) - \
output_y(center_x + scan_center.x, scan_center.y)));
RDom right_x(left_buffer.width() * 3/4, left_buffer.width() / 4);
RDom scan_right(left_buffer.width() * 3/4, left_buffer.width() / 4, 0, left_buffer.height());
scan_right.where(scan_right.x != right_x && scan_right.y != y);
support(right_x, y) = argmin(scan_right, abs(output_x(right_x, y) - output_x(scan_right.x, scan_right.y)) + abs(output_y(right_x, y) - output_y(scan_right.x, scan_right.y)));
support.compute_root();
support.trace_stores();
Realization r_left = support.realize(left_buffer.width(), left_buffer.height());
but this code gives errors in the following lines:
scan_left.where(scan_left.x != left_x && scan_left.y != y);
...
scan_right.where(scan_right.x != right_x && scan_right.y != y);
A simple way to solve this problem would be to use Halide's select method (example given here). Something like this should work:
Func support("support");
Expr left_x = clamp(x, 0, left_buffer.width() / 4);
RDom scan_left(0, left_buffer.width() / 4, 0, left_buffer.height());
scan_left.where(scan_left.x != left_x && scan_left.y != y);
Expr first = argmin(abs(output_x(left_x, y) - output_x(scan_left.x, scan_left.y)) + abs(output_y(left_x, y) - output_y(scan_left.x, scan_left.y)));
Expr center_x = clamp(x, left_buffer.width() / 4, left_buffer.width() * 3/4);
RDom scan_center(-left_buffer.width() / 4, left_buffer.width() / 2, 0, left_buffer.height());
scan_center.where(scan_center.x != 0 && scan_center.y != 0);
Expr second = argmin(abs(output_x(center_x, y) - output_x(center_x + scan_center.x, scan_center.y)) + abs(output_y(center_x, scan_center.y) - output_y(center_x + scan_center.x, scan_center.y)));
Expr right_x = clamp(x, left_buffer.width() * 3/4, left_buffer.width());
RDom scan_right(left_buffer.width() * 3/4, left_buffer.width() / 4, 0, left_buffer.height());
scan_right.where(scan_right.x != right_x && scan_right.y != y);
Expr third = argmin(abs(output_x(right_x, y) - output_x(scan_right.x, scan_right.y)) + abs(output_y(right_x, y) - output_y(scan_right.x, scan_right.y)));
int width = left_buffer.width();
# select based on x value
support(x, y) = select(x < width / 4, first, x < width * 3 / 4, second, third);
support.trace_stores();
Realization r = support.realize(left_buffer.width(), left_buffer.height());
`

How to get a color range from a "Drag Box" in OpenCV 2.4

I'm using Python and OpenCV 2.4. I'm trying to get a HSV average from an area selected by dragging the mouse, much like in the camShift example provided by OpenCV. But I want the X, Y of the selected color instances in a video feed.
I've been hacking at the onmouse function in camShift. I feel it is close to want I want, I just can't seem to extract the mean HSV values of the area selected. I know I could probably get this done with a for loop, but trying to make it as responsive as possible.
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.tracking_state = 0
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
h, w = 480, 640 # self.frame.shape[:2]
xo, yo = self.drag_start
x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
self.selection = None
if x1-x0 > 0 and y1-y0 > 0:
self.selection = (x0, y0, x1, y1)
else:
self.drag_start = None
if self.selection is not None:
self.tracking_state = 1
Ok. It's crude, but this seems to be a lot closer than I was:
import numpy as np
import cv2
import video
class App(object):
def __init__(self, video_src):
#self.cam = video.create_capture(video_src)
self.cam = cv2.VideoCapture(0)
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.tracking_state = 0
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
h, w = self.frame.shape[:2]
xo, yo = self.drag_start
x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
self.selection = None
if x1-x0 > 0 and y1-y0 > 0:
self.selection = (x0, y0, x1, y1)
else:
self.drag_start = None
if self.selection is not None:
self.tracking_state = 1
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def run(self):
while True:
ret, self.frame = self.cam.read()
self.frame = cv2.blur(self.frame,(3,3))
vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
mask2 = mask.copy()
if self.selection:
x0, y0, x1, y1 = self.selection
self.track_window = (x0, y0, x1-x0, y1-y0)
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
#cv2.norm(hsv_roi)
dHSV = cv2.mean(hsv_roi)
h, s, v = int(dHSV[0]), int(dHSV[1]), int(dHSV[2])
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[y0:y1, x0:x1]
cv2.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
if self.tracking_state == 1:
self.selection = None
cv2.imshow('camshift', vis)
if self.tracking_state == 1:
if h > 159:
h = 159
if s > 235:
s = 235
if v > 235:
v = 235
if h < 20:
h = 20
if s < 20:
s = 20
if v < 20:
v = 20
thresh = cv2.inRange(hsv,np.array(((h-20), (s-20), (v-20))), np.array(((h+20), (s+20), (v+20))))
thresh2 = thresh.copy()
# find contours in the threshold image
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#best_cnt = 1
max_area = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
best_cnt = cnt
# finding centroids of best_cnt and draw a circle there
M = cv2.moments(best_cnt)
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
print cx, cy
cv2.circle(thresh2,(cx,cy),20,255,-1)
cv2.imshow('thresh',thresh2)
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try: video_src = sys.argv[1]
except: video_src = 0
print __doc__
App(video_src).run()
I hack-sawed Rahman's example and camShift

Drawing a line between two points

Here's what I got so far. I rewrote the code to simplify things a bit. Previous code wasn't actually the real, basic algorithm. It had fluff that I didn't need. I answered the question about pitch, and below you'll see some images of my test results.
local function Line (buf, x1, y1, x2, y2, color, pitch)
-- identify the first pixel
local n = x1 + y1 * pitch
-- // difference between starting and ending points
local dx = x2 - x1;
local dy = y2 - y1;
local m = dy / dx
local err = m - 1
if (dx > dy) then -- // dx is the major axis
local j = y1
local i = x1
while i < x2 do
buf.buffer[j * pitch + i] = color
if (err >= 0) then
i = i + 1
err = err - 1
end
j = j + 1
err = err + m
end
else -- // dy is the major axis
local j = x1
local i = y1
while i < y2 do
buf.buffer[i * pitch + j] = color
if (err >= 0) then
i = i + 1
err = err - 1
end
j = j + 1
err = err + m
end
end
end
-- (visdata[2][1][576], int isBeat, int *framebuffer, int *fbout, int w, int h
function LibAVSSuperScope:Render(visdata, isBeat, framebuffer, fbout, w, h)
local size = 5
Line (self.buffer, 0, 0, 24, 24, 0xffff00, 24)
do return end
end
Edit: Oh I just realized something. 0,0 is in the lower left-hand corner. So the function's sort of working, but it's overlapping and slanted.
Edit2:
Yeah, this whole thing's broken. I'm plugging numbers into Line() and getting all sort of results. Let me show you some.
Here's Line (self.buffer, 0, 0, 23, 23, 0x00ffff, 24 * 2)
And here's Line (self.buffer, 0, 1, 23, 23, 0x00ffff, 24 * 2)
Edit: Wow, doing Line (self.buffer, 0, 24, 24, 24, 0x00ffff, 24 * 2) uses way too much CPU time.
Edit: Here's another image using this algorithm. The yellow dots are starting points.
Line (self.buffer, 0, 0, 24, 24, 0xff0000, 24)
Line (self.buffer, 0, 12, 23, 23, 0x00ff00, 24)
Line (self.buffer, 12, 0, 23, 23, 0x0000ff, 24)
Edit: And yes, that blue line wraps around.
This one works.
Line (self.buffer, 0, 0, 23, 23, 0xff0000, 24 * 2)
Line (self.buffer, 0, 5, 23, 23, 0x00ff00, 24)
Line (self.buffer, 12, 0, 23, 23, 0x0000ff, 24)
--
local function Line (buf, x0, y0, x1, y1, color, pitch)
local dx = x1 - x0;
local dy = y1 - y0;
buf.buffer[x0 + y0 * pitch] = color
if (dx ~= 0) then
local m = dy / dx;
local b = y0 - m*x0;
if x1 > x0 then
dx = 1
else
dx = -1
end
while x0 ~= x1 do
x0 = x0 + dx
y0 = math.floor(m*x0 + b + 0.5);
buf.buffer[x0 + y0 * pitch] = color
end
end
end
Here's the spiral.
The one below dances around like a music visualization, but we're just feeding it random data. I think the line quality could be better.
This is what I settled on. I just had to find valid information on that Bresenham algorithm. Thanks cs-unc for the information about various line algorithms, from simple to complex.
function LibBuffer:Line4(x0, y0, x1, y1, color, pitch)
local dx = x1 - x0;
local dy = y1 - y0;
local stepx, stepy
if dy < 0 then
dy = -dy
stepy = -1
else
stepy = 1
end
if dx < 0 then
dx = -dx
stepx = -1
else
stepx = 1
end
self.buffer[x0 + y0 * pitch] = color
if dx > dy then
local fraction = dy - bit.rshift(dx, 1)
while x0 ~= x1 do
if fraction >= 0 then
y0 = y0 + stepy
fraction = fraction - dx
end
x0 = x0 + stepx
fraction = fraction + dy
self.buffer[floor(y0) * pitch + floor(x0)] = color
end
else
local fraction = dx - bit.rshift(dy, 1)
while y0 ~= y1 do
if fraction >= 0 then
x0 = x0 + stepx
fraction = fraction - dy
end
y0 = y0 + stepy
fraction = fraction + dx
self.buffer[floor(y0) * pitch + floor(x0)] = color
end
end
end
Here's what this one looks like.

Resources