Skip to content
Snippets Groups Projects
Commit 0b13d313 authored by fouad5's avatar fouad5
Browse files

feature detection using lineair classification

parent 79d580bb
No related branches found
No related tags found
No related merge requests found
Showing
with 75 additions and 0 deletions
src/features_detection/background.jpg

149 KiB

import numpy as np
import cv2
import argparse
from detect_shapes import detecting
def getSobel (channel):
......@@ -36,96 +34,14 @@ def findSignificantContours (img, sobel_8u):
significant.sort(key=lambda x: x[1])
return [x[0] for x in significant];
def getColor(colorValues, colorNames, maxValue):
for i, item in enumerate(colorValues):
if(item == maxValue):
return colorNames[i]
def getCroppedImage(image):
# im1 = cv2.imread(image)
#image1 = imread("/path/to/image1")
#image2 = imread("/path/to/image2")
# image3 = im1 - im
img = image #cv2.imread(image)
# Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
crop_img = img[300:550, 300:550]
#cv2.imshow("cropped", crop_img)
cv2.imwrite("result.jpg", crop_img)
#cv2.waitKey(0)
# cv2.imshow('frame',img)
# cv2.waitKey(0)
return crop_img
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
# fgbg = cv2.createBackgroundSubtractorGMG()
# fgmask = fgbg.apply(im1)
# fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# cv2.imshow('frame',fgmask)
# cv2.imshow("Image", img)
# cv2.waitKey(0)
# imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray,127,255,0)
# contours, hierarchy, _ = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# ctr = np.array(contours).reshape((-1,1,2)).astype(np.int32)
# cv2.drawContours(im1, [ctr], 0, (0,255,0), -1)
# cv2.imwrite("result.jpg", im1)
# cv2.waitKey(0)
def getNegative(img):
background = cv2.imread('background.jpg')
background = background[300:550, 300:550]
img = cv2.absdiff(background,img)
return img
def getFeatures (image):
size = shape = color = ""
copy = image
img = getCroppedImage(image)
img = getNegative(img)
# find object
detection = detecting(img)
def segment (img):
shape = detection[0]
if(detection[2] > 7000):
size = "BIG"
elif(detection[2] > 5000):
size = "MEDIUM"
else:
size = "SMALL"
img = getCroppedImage(copy)
# img = cv2.imread(path)
# getObjectFromImage('background.jpg', path)
blurred = cv2.GaussianBlur(img, (5, 5), 0) # Remove noise
# Edge operator
sobel = np.max( np.array([ getSobel(blurred[:,:, 0]), getSobel(blurred[:,:, 1]), getSobel(blurred[:,:, 2]) ]), axis=0 )
# Noise reduction trick, from http://sourceforge.net/p/octave/image/ci/default/tree/inst/edge.m#l182
mean = np.median(sobel)
# Zero any values less than mean. This reduces a lot of noise.
......@@ -138,77 +54,22 @@ def getFeatures (image):
# Find contours
significant = findSignificantContours(img, sobel_8u)
# print("significant: ", significant)
# print("cnts: ", [shape[1]])
# Mask
mask = sobel.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, [detection[1]], 255)
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
return img
# fname = path.split('/')[-1]
# cv2.imwrite('output/' + fname, img);
#cv2.imshow("Image", img)
# cv2.waitKey(0)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image")
args = vars(ap.parse_args())
# load the image
image = img # cv2.imread(args["image"])
# define the list of boundaries
# BGR
boundaries = [
([0, 0, 140], [140, 140, 255]), #red
([140, 0, 0], [255, 140, 140]), #blue
([0, 140, 0], [140, 255, 140]), #green
([103, 86, 65], [145, 133, 128]) #grey
]
colorValues = []
# loop over the boundaries
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
#print(cl.label(image, mask))
# save the values
colorValues.append(np.count_nonzero(mask))
#print(np.count_nonzero(mask))
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
#cv2.imshow("images", np.hstack([image, output]))
#cv2.waitKey(0)
color = getColor(colorValues, ['RED', 'BLUE', 'GREEN', 'GREY'], max(colorValues))
return {'shape': shape, 'color': color, 'size': size}
image = cv2.imread('sphere3.jpg')
print(getFeatures(image))
# cv2.imshow('output', img)
# cv2.key
# cv2.imwrite('output/' + fname, img)
# print (path)
#segment('original-small.jpg')
src/features_detection/cube.jpg

254 KiB

src/features_detection/cube2.jpg

253 KiB

src/features_detection/cube3.jpg

149 KiB

File added
src/features_detection/dataset/bluecircle.1.jpg

67.6 KiB

src/features_detection/dataset/bluecircle.2.jpg

67.6 KiB

src/features_detection/dataset/bluecircle.3.jpg

68.4 KiB

src/features_detection/dataset/bluecube.1.jpg

69.2 KiB

src/features_detection/dataset/bluecube.2.jpg

69.6 KiB

src/features_detection/dataset/bluecube.3.jpg

67.6 KiB

src/features_detection/dataset/bluetriangle.1.jpg

67.9 KiB

src/features_detection/dataset/bluetriangle.2.jpg

67.4 KiB

src/features_detection/dataset/bluetriangle.3.jpg

67.1 KiB

src/features_detection/dataset/greencircle.1.jpg

69.4 KiB

src/features_detection/dataset/greencircle.2.jpg

69.6 KiB

src/features_detection/dataset/greencube.1.jpg

68.3 KiB

src/features_detection/dataset/greencube.2.jpg

69.2 KiB

src/features_detection/dataset/greencube.3.jpg

69.3 KiB

0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment