Commit 89818275 authored by fouad5's avatar fouad5
Browse files

features detection test ready

parent f84a5677
# USAGE
# python detect_shapes.py --image shapes_and_colors.png
# import the necessary packages
from shapedetector import ShapeDetector
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True,
# help="path to the input image")
#args = vars(ap.parse_args())
# load the image and resize it to a smaller factor so that
# the shapes can be approximated better
def detecting(image):
# image = cv2.imread(image)
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 10, 200, cv2.THRESH_BINARY)[1]
#cv2.imshow("Image", thresh)
#cv2.waitKey(0)
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
# c = cnts[0]
M = cv2.moments(c)
if(M["m00"] > 3000):
cX = int((M["m10"] / (M["m00"])) * ratio)
cY = int((M["m01"] / (M["m00"])) * ratio)
# print(M)
shape = sd.detect(c)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
# show the output image
# cv2.imshow("Image", image)
# cv2.waitKey(0)
return [shape, c, M["m00"]]
import numpy as np
import cv2
import argparse
from detect_shapes import detecting
def getSobel (channel):
......@@ -40,8 +41,80 @@ def getColor(colorValues, colorNames, maxValue):
if(item == maxValue):
return colorNames[i]
def segment (path):
img = cv2.imread(path)
def getCroppedImage(image):
# im1 = cv2.imread(image)
#image1 = imread("/path/to/image1")
#image2 = imread("/path/to/image2")
# image3 = im1 - im
img = cv2.imread(image)
# Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
crop_img = img[300:550, 300:550]
#cv2.imshow("cropped", crop_img)
cv2.imwrite("result.jpg", crop_img)
#cv2.waitKey(0)
# cv2.imshow('frame',img)
# cv2.waitKey(0)
return crop_img
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
# fgbg = cv2.createBackgroundSubtractorGMG()
# fgmask = fgbg.apply(im1)
# fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# cv2.imshow('frame',fgmask)
# cv2.imshow("Image", img)
# cv2.waitKey(0)
# imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray,127,255,0)
# contours, hierarchy, _ = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# ctr = np.array(contours).reshape((-1,1,2)).astype(np.int32)
# cv2.drawContours(im1, [ctr], 0, (0,255,0), -1)
# cv2.imwrite("result.jpg", im1)
# cv2.waitKey(0)
def getNegative(img):
background = cv2.imread('background.jpg')
background = background[300:550, 300:550]
img = cv2.absdiff(background,img)
return img
def getFeatures (path):
size = shape = color = ""
img = getCroppedImage(path)
img = getNegative(img)
# find object
detection = detecting(img)
shape = detection[0]
if(detection[2] > 7000):
size = "BIG"
elif(detection[2] > 5000):
size = "MEDIUM"
else:
size = "SMALL"
img = getCroppedImage(path)
# img = cv2.imread(path)
# getObjectFromImage('background.jpg', path)
blurred = cv2.GaussianBlur(img, (5, 5), 0) # Remove noise
......@@ -61,11 +134,12 @@ def segment (path):
# Find contours
significant = findSignificantContours(img, sobel_8u)
# print("significant: ", significant)
# print("cnts: ", [shape[1]])
# Mask
mask = sobel.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
cv2.fillPoly(mask, [detection[1]], 255)
# Invert mask
mask = np.logical_not(mask)
......@@ -74,11 +148,11 @@ def segment (path):
fname = path.split('/')[-1]
cv2.imwrite('output/' + fname, img);
print (path)
# cv2.imshow("Image", img)
cv2.imwrite('camera_image_no_background.jpeg', img)
# cv2.waitKey(0)
#cv2.imshow("Image", img)
# cv2.waitKey(0)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
......@@ -87,41 +161,50 @@ def segment (path):
# load the image
image = img # cv2.imread(args["image"])
# define the list of boundaries
# BGR
boundaries = [
([17, 15, 100], [255, 56, 200]), #red
([86, 31, 4], [220, 88, 50]), #blue
([25, 146, 190], [62, 174, 250]), #yellowish
([0, 0, 140], [140, 140, 255]), #red
([140, 0, 0], [255, 140, 140]), #blue
([0, 140, 0], [140, 255, 140]), #green
([103, 86, 65], [145, 133, 128]) #grey
]
colorValues = []
# loop over the boundaries
for i, (lower, upper) in enumerate(boundaries):
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
#print(cl.label(image, mask))
# save the values
colorValues.append(np.count_nonzero(mask))
#print(np.count_nonzero(mask))
output = cv2.bitwise_and(image, image, mask = mask)
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
#cv2.imshow("images", np.hstack([image, output]))
#cv2.waitKey(0)
color = getColor(colorValues, ['RED', 'BLUE', 'GREEN', 'GREY'], max(colorValues))
return {'shape': shape, 'color': color, 'size': size}
print(getFeatures('cube.jpg'))
# show the images
# cv2.imshow("images", np.hstack([image, output]))
cv2.imwrite('camera_image_color_{}.jpeg'.format(i), np.hstack([image, output]))
# cv2.waitKey(0)
print(getColor(colorValues, ['RED', 'BLUE', 'YELLOW(ish)', 'GREY'], max(colorValues)))
segment('camera_image.jpeg')
# import the necessary packages
import cv2
class ShapeDetector:
def __init__(self):
pass
def detect(self, c):
# initialize the shape name and approximate the contour
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
# if the shape is a triangle, it will have 3 vertices
if len(approx) == 3:
shape = "triangle"
# if the shape has 4 vertices, it is either a square or
# a rectangle
elif len(approx) == 4:
# compute the bounding box of the contour and use the
# bounding box to compute the aspect ratio
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
# if the shape is a pentagon, it will have 5 vertices
elif len(approx) == 5:
shape = "pentagon"
# otherwise, we assume the shape is a circle
else:
shape = "circle"
# return the name of the shape
return shape
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment