I have a program which opens turtle window & opencv video window together. I want to adjust 2 windows together or rather stitch them horizontally. How can I do this?
class PrettyWidget(QtGui.QWidget):
def __init__(self):
super(PrettyWidget, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(500, 100, 500, 500)
self.setWindowTitle('')
btn = QtGui.QPushButton('Please select video', self)
btn.resize(btn.sizeHint())
btn.clicked.connect(self.SingleBrowse)
btn.move(150, 200)
self.show()
def SingleBrowse(self):
video_path = QtGui.QFileDialog.getOpenFileName(self,'Single File',"./")
video = cv2.VideoCapture(video_path)
preprocess = preprocessing(config,"Occupancy Grid Matrix") # this makes turtle window having ogm
while(True):
ret,frame = video.read()
if frame is None:
break
cv2.imshow("",frame)
cv2.waitKey(1)
I want turtle window & opencv window to be displayed together in 1 window rather than adjusting 2 windows manually to be in center of screen.
Related
I want to increase / decrease the height of the image for the selected area only (The area between the white lines) as depicted in the image and not the outside of that area.
This is the same functionality which is performed in the app Manly - Body Muscle Editor Pro
How can I achieve that? Any help is appreciated.
I've never written code for IOS but I know OpenCV also works in IOS. Here I use the cv2.resize.
import cv2
import numpy as np
img = cv2.imread("1.jpg")
print(img.shape)
h = img.shape[0]
w = img.shape[1]
part_to_resize = img[120:240,:]
old_height = 120 #240-120
new_height = 200
final_result = np.zeros((h-(240-120)+new_height,w,3),dtype='uint8')
final_result[0:119,:] = img[0:119,:]
final_result[120:320,:] = cv2.resize(part_to_resize, (w, new_height))
final_result[321:h-old_height+new_height,:] = img[241:h,:]
cv2.imshow("final_result", final_result)
cv2.imshow("img", img)
cv2.waitKey()
Im trying to use the opencv to remove the background of my pictures.
When Im running a single file. It works out.
The code as below:
def bgremove(name,count):
import cv2
import numpy as np
# cv2.namedWindow('image',cv2.WINDOW_NORMAL)
#Load the Image
imgo = cv2.imread(name)# the place to input picture path
height,width = imgo.shape[:2]
#Create a mask holder
mask = np.zeros(imgo.shape[:2],np.uint8)
#Grab Cut the object
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
#Hard Coding the Rect… The object must lie within this rect.
rect = (10,10,width-30,height-30)
cv2.grabCut(imgo,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img1 = imgo*mask[:,:,np.newaxis]
#Get the background
background = imgo-img1
#Change all pixels in the background that are not black to white
background[np.where((background > [0,0,0]).all(axis = 2))] = [255,255,255]
#Add the background and the image
final = background + img1
DP1=count
#To be done – Smoothening the edges….
cv2.imwrite("A%s.JPG"%DP1, final)
However, when I use the function in a for loop. it pops-up:
error: (-215:Assertion failed) totalSampleCount > 0 in function
'GMM::endLearning'
when Im generating a group of pictures
I encountered this problem and the issue was that the rectangle rect was too small. I don't know the dimensions of your image but try a bigger rectangle and it may solve this.
I have done real time face detection system, but I need to add gui for the program. Instead of using the solution from here. I does not want to read frame in a recursion way.
def show_frame():
_,frame = cap.read()
... #skip
lmain.after(10,show_frame)
It require a lot of refactoring in my previous code. So, I prefer read frame in a while loop way. But it does not work. Thanks for help.
import numpy as np
import cv2
import tkinter as tk
from PIL import Image, ImageTk
window = tk.Tk()
window.wm_title("Test")
imageFrame = tk.Frame(window, width=600, height=500)
imageFrame.grid(row=0, column=0, padx=10, pady=2)
#Capture video frames
lmain = tk.Label(imageFrame)
lmain.grid(row=0, column=0)
cap = cv2.VideoCapture(0)
def show_frame(frame):
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
# lmain.after(10, show_frame)
while True:
_,frame = cap.read()
show_frame(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
window.mainloop()
I have an image that has some text in it. I want to send the image to OCR but the image has some white noise in it so the OCR results aren't that great. I've tried to erode/dilate the image but couldn't get the perfect threshold to work. Since all the text in the images will be perfectly horizontal I tried the Hough Transform.
Here is what the image looks like when I run the sample hough transform program bundled with OpenCV.
Question
How can I black out everything except where the red lines are?
OR How can I crop out a separate images for each of the areas highlighted by the red lines?
I would only like to concentrate on lines that are horizontal, I can discard the diagonal lines.
Either option will work for me when sending to OCR. However, I'd like to try both to see which fetches best results.
howto/s with output
How can I black out everything except where the red lines are?
dotess2()
['Footel text goes he: e\n', 'Some mole hele\n', 'Some Text Here\n']
OR How can I crop out a separate images for each of the areas highlighted by the red lines?
dotess1()
['Foolel text goes he: e\n', 'Some mole hele\n', 'Some Text Here\n', 'Directions\n']
code
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import math
import subprocess
import os
import operator
#some clean up/init blah blah
junk='\/,-‘’“ ”?.\';!{§_~!##$%^&*()_+-|:}»£[]¢€¥°><'
tmpdir='./tmp'
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
for path, subdirs, files in os.walk(tmpdir):
for name in files:
os.remove(os.path.join(path, name))
#when the preprocessor is not pefect, there will be junk in the result. this is a crude mean of ridding them off
def resfilter(res):
rd = dict()
for l in set(res):
rd[l]=0.
for l in rd:
for i in l:
if i in junk:
rd[l]-=1
elif i.isdigit():
rd[l]+=.5
else:
rd[l]+=1
ret=[]
for v in sorted(rd.iteritems(), key=operator.itemgetter(1), reverse=True):
ret.append(v[0])
return ret
def dotess1():
res =[]
for path, subdirs, files in os.walk(tmpdir):
for name in files:
fpath = os.path.join(path, name)
img = cv2.imread(fpath)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
'''
#if the text is too small/contains noise etc, resize and maintain aspect ratio
if gray.shape[1]<100:
gray=cv2.resize(gray,(int(100/gray.shape[0]*gray.shape[1]),100))
'''
cv2.imwrite('tmp.jpg',gray)
args = ['tesseract.exe','tmp.jpg','tessres','-psm','7', '-l','eng']
subprocess.call(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open('tessres.txt') as f:
for line in f:
if line.strip() != '':
res.append(line)
print resfilter(res)
def dotess2():
res =[]
args = ['tesseract.exe','clean.jpg','tessres','-psm','3', '-l','eng']
subprocess.call(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open('tessres.txt') as f:
for line in f:
if line.strip() != '':
res.append(line)
print resfilter(res)
'''
start of code
'''
img = cv2.imread('c:/data/ocr3.png')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
canny=cv2.Canny(gray,50,200,3)
cv2.imshow('canny',canny)
#remove the actual horizontal lines so that hough wont detect them
linek = np.zeros((11,11),dtype=np.uint8)
linek[5,...]=1
x=cv2.morphologyEx(canny, cv2.MORPH_OPEN, linek ,iterations=1)
canny-=x
cv2.imshow('canny no horizontal lines',canny)
#draw a fat line so that you can box it up
lines = cv2.HoughLinesP(canny, 1, math.pi/2, 50,50, 50, 20)
linemask = np.zeros(gray.shape,gray.dtype)
for line in lines[0]:
if line[1]==line[3]:#check horizontal
pt1 = (line[0],line[1])
pt2 = (line[2],line[3])
cv2.line(linemask, pt1, pt2, (255), 30)
cv2.imshow('linemask',linemask)
'''
* two methods of doing ocr,line mode and page mode
* boxmask is used to so that a clean image can be saved for page mode
* for every detected boxes, the roi are cropped and saved so that tess3 can be run in line mode
'''
boxmask = np.zeros(gray.shape,gray.dtype)
contours,hierarchy = cv2.findContours(linemask,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
idx=0
for cnt in contours:
idx+=1
area = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
roi=img[y:y+h,x:x+w].copy()
cv2.imwrite('%s/%s.jpg'%(tmpdir,str(idx)),roi)
cv2.rectangle(boxmask,(x,y),(x+w,y+h),(255),-1)
cv2.imshow('clean',img&cv2.cvtColor(boxmask,cv2.COLOR_GRAY2BGR))
cv2.imwrite('clean.jpg',img&cv2.cvtColor(boxmask,cv2.COLOR_GRAY2BGR))
cv2.imshow('img',img)
dotess1()
dotess2()
cv2.waitKey(0)
I am trying to create a sort of image player with python and opencv. The images that i show are the same resolution on my screen and i would like to display them bordless in a full screen mode (without the windows bar at the bottom and the image bar at the top).
I accept also advice in order to improve my "var" used a counter for displaying the images:)
Thanks
def main():
var= 0
while True:
print 'loading images...'
if var==0:
img = cv2.imread('2-c.jpg')
var=var+1
else:
img = cv2.imread('2-d.jpg')
cv2.imshow("test",img)
key=cv2.waitKey(0)
if key==27:
break
EDIT:
I post an image and maybe i can explain myself better:
as you can see there is still the blue bar on top
Here is how I did it on my end:
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow("window", img)
Thanks to Poko, I am gonna post the solution:
def main():
var= 0
while True:
print('loading images...')
if var==0:
img = cv2.imread('2-c.jpg')
var=var+1
else:
img = cv2.imread('2-d.jpg')
cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.CV_WINDOW_FULLSCREEN)
cv2.imshow("test",img)
key=cv2.waitKey(0)
if key==27:
break
You have to create a window before doing your imshow.
take a look here: http://docs.opencv.org/modules/highgui/doc/user_interface.html#namedwindow
I had a case where the image on the Raspberry Pi was not displayed in full screen, but only at the top in a fixed size. It helped me to add another line to the above code.
import cv2
cap = cv2.VideoCapture(0)
width, height = cap.get(3), cap.get(4)
while True:
_, frame = cap.read()
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.moveWindow("window", int((Screen_Width/2)-(width/2)), int((Screen_Height/2)-(height/2)))
cv2.imshow("window", frame)
if cv2.waitKey(1) == 27: #Esc
cap.release()
cv2.destroyAllWindows()
break