Commit 9e00d04e authored by Mathieu Reymond's avatar Mathieu Reymond
Browse files

feature detection integration, demo file

parent d3c37b07
<robot name="block_blue">
<link name="block_blue">
<inertial>
<origin xyz="0.025 0.025 0.025" />
<mass value="0.5" />
<inertia ixx="0.5" ixy="0.0" ixz="0.0" iyy="0.5" iyz="0.0" izz="0.5" />
</inertial>
<visual>
<origin xyz="0.025 0.025 0.025"/>
<geometry>
<box size="0.05 0.05 0.05" />
</geometry>
</visual>
<collision>
<origin xyz="0.025 0.025 0.025"/>
<geometry>
<box size="0.05 0.05 0.05" />
</geometry>
</collision>
</link>
<gazebo reference="block_blue">
<material>Gazebo/Blue</material>
<mu1>1000</mu1>
<mu2>1000</mu2>
</gazebo>
</robot>
<robot name="block">
<link name="block">
<robot name="block_red">
<link name="block_red">
<inertial>
<origin xyz="0.025 0.025 0.025" />
<mass value="0.5" />
......@@ -18,7 +18,7 @@
</geometry>
</collision>
</link>
<gazebo reference="block">
<gazebo reference="block_red">
<material>Gazebo/Red</material>
<mu1>1000</mu1>
<mu2>1000</mu2>
......
#Learning imports
import features_detection as feat
from features_detection.recognizer import getFeatures
from decision_api.decision_tree_gr import DecisionTreeGR
from planning.baxter import Baxter
from planning.scene import Scene
import cv2
#Baxter and ROS imports
import baxter_interface
import rospy
......@@ -34,9 +34,8 @@ class BaxterAgent(object):
self.tree = self.load_decision_tree()
def extract_features(self, img):
return {'shape': 'SPHERE',
'color': 'RED',
'size': 'SMALL'}
f = getFeatures(img)
return f
def _train_sort(self, arm_name, msg):
arm = getattr(self.baxter, arm_name)
......@@ -47,6 +46,7 @@ class BaxterAgent(object):
self.baxter.sort(arm_name)
f = self.extract_features(img)
f['direction'] = arm_name.upper()
print(f)
self.tree.add_training_sample(f)
def _train_end(self, msg):
......@@ -73,8 +73,11 @@ class BaxterAgent(object):
self.baxter.left.move(self.baxter.left.top)
img = self.baxter.left.take_snapshot()
features = self.extract_features(img)
cv2.imwrite('tosort.jpg', img)
print(features)
direction = self.tree.classify(features).lower()
if direction is not 'left':
print(direction)
if direction != 'left':
self.baxter.left.move(self.baxter.left.neutral)
self.baxter.sort(direction)
......
#!/usr/bin/python
import moveit_commander
import rospy
import sys
from baxter_agent import BaxterAgent
from planning.scene import Scene
config = {
'is_train': True,
'tree_dir': './saved_trees/',
'scene': 'scene description' #Should contain a description of the scene to be loaded
}
def add_item(scene, color, msg):
if msg:
scene.set_item(color=color)
def main():
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('garbage_bot', anonymous=True)
b = BaxterAgent(config)
ab = lambda m: add_item(b.scene, 'blue', m)
ar = lambda m: add_item(b.scene, 'red', m)
b.navs['left'].button2_changed.connect(ab)
b.navs['right'].button2_changed.connect(ar)
b.train()
b.run()
if __name__ == '__main__':
main()
import numpy as np
import cv2
import argparse
def getSobel (channel):
sobelx = cv2.Sobel(channel, cv2.CV_16S, 1, 0, borderType=cv2.BORDER_REPLICATE)
sobely = cv2.Sobel(channel, cv2.CV_16S, 0, 1, borderType=cv2.BORDER_REPLICATE)
sobel = np.hypot(sobelx, sobely)
return sobel;
def findSignificantContours (img, sobel_8u):
image, contours, heirarchy = cv2.findContours(sobel_8u, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
# From among them, find the contours with large surface area.
significant = []
tooSmall = sobel_8u.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.append([contour, area])
significant.sort(key=lambda x: x[1])
return [x[0] for x in significant];
def getColor(colorValues, colorNames, maxValue):
for i, item in enumerate(colorValues):
if(item == maxValue):
return colorNames[i]
def segment (path):
img = cv2.imread(path)
blurred = cv2.GaussianBlur(img, (5, 5), 0) # Remove noise
# Edge operator
sobel = np.max( np.array([ getSobel(blurred[:,:, 0]), getSobel(blurred[:,:, 1]), getSobel(blurred[:,:, 2]) ]), axis=0 )
mean = np.median(sobel)
# Zero any values less than mean. This reduces a lot of noise.
sobel[sobel <= mean] = 0;
sobel[sobel > 255] = 255;
cv2.imwrite('output/edge.png', sobel);
sobel_8u = np.asarray(sobel, np.uint8)
# Find contours
significant = findSignificantContours(img, sobel_8u)
# Mask
mask = sobel.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
fname = path.split('/')[-1]
cv2.imwrite('output/' + fname, img);
print (path)
# cv2.imshow("Image", img)
cv2.imwrite('camera_image_no_background.jpeg', img)
# cv2.waitKey(0)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image")
args = vars(ap.parse_args())
# load the image
image = img # cv2.imread(args["image"])
# define the list of boundaries
boundaries = [
([17, 15, 100], [255, 56, 200]), #red
([86, 31, 4], [220, 88, 50]), #blue
([25, 146, 190], [62, 174, 250]), #yellowish
([103, 86, 65], [145, 133, 128]) #grey
]
colorValues = []
# loop over the boundaries
for i, (lower, upper) in enumerate(boundaries):
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
#print(cl.label(image, mask))
# save the values
colorValues.append(np.count_nonzero(mask))
#print(np.count_nonzero(mask))
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
# cv2.imshow("images", np.hstack([image, output]))
cv2.imwrite('camera_image_color_{}.jpeg'.format(i), np.hstack([image, output]))
# cv2.waitKey(0)
print(getColor(colorValues, ['RED', 'BLUE', 'YELLOW(ish)', 'GREY'], max(colorValues)))
segment('camera_image.jpeg')
......@@ -22,7 +22,7 @@ def detecting(image):
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
cv2.imwrite('rezised.jpg', resized)
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
......@@ -34,7 +34,7 @@ def detecting(image):
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
......@@ -42,17 +42,19 @@ def detecting(image):
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
# c = cnts[0]
M = cv2.moments(c)
if(M["m00"] > 3000):
if(M["m00"] > 2000):
cX = int((M["m10"] / (M["m00"])) * ratio)
cY = int((M["m01"] / (M["m00"])) * ratio)
# print(M)
shape = sd.detect(c)
shape = sd.detect(c).upper()
if shape == 'RECTANGLE' or shape == 'SQUARE' or shape == 'PENTAGON':
shape = 'CUBE'
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
......@@ -63,9 +65,7 @@ def detecting(image):
0.5, (255, 255, 255), 2)
# show the output image
# cv2.imshow("Image", image)
# cv2.waitKey(0)
# cv2.imshow("Image", image)
# cv2.waitKey(0)
return [shape, c, M["m00"]]
......@@ -42,7 +42,7 @@ def getColor(colorValues, colorNames, maxValue):
return colorNames[i]
def getCroppedImage(image):
# im1 = cv2.imread(image)
#image1 = imread("/path/to/image1")
#image2 = imread("/path/to/image2")
......@@ -53,9 +53,8 @@ def getCroppedImage(image):
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
crop_img = img[300:550, 300:550]
crop_img = img[400:650, 300:550]
#cv2.imshow("cropped", crop_img)
cv2.imwrite("result.jpg", crop_img)
#cv2.waitKey(0)
# cv2.imshow('frame',img)
# cv2.waitKey(0)
......@@ -96,15 +95,13 @@ def getFeatures (image):
size = shape = color = ""
copy = image
img = getCroppedImage(image)
img = getNegative(img)
# find object
detection = detecting(img)
shape = detection[0]
if(detection[2] > 7000):
size = "BIG"
elif(detection[2] > 5000):
......@@ -114,8 +111,8 @@ def getFeatures (image):
img = getCroppedImage(copy)
# img = cv2.imread(path)
# getObjectFromImage('background.jpg', path)
......@@ -153,7 +150,7 @@ def getFeatures (image):
# fname = path.split('/')[-1]
# cv2.imwrite('output/' + fname, img);
#cv2.imshow("Image", img)
# cv2.waitKey(0)
......@@ -165,10 +162,10 @@ def getFeatures (image):
# load the image
image = img # cv2.imread(args["image"])
# define the list of boundaries
# BGR
boundaries = [
......@@ -196,7 +193,7 @@ def getFeatures (image):
#print(np.count_nonzero(mask))
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
#cv2.imshow("images", np.hstack([image, output]))
#cv2.waitKey(0)
......@@ -207,8 +204,5 @@ def getFeatures (image):
image = cv2.imread('sphere3.jpg')
print(getFeatures(image))
#image = cv2.imread('sphere3.jpg')
#print(getFeatures(image))
......@@ -3,6 +3,7 @@ import rospy
from moveit_commander import PlanningSceneInterface
from geometry_msgs.msg import PoseStamped
from gazebo_msgs.srv import SpawnModel, DeleteModel
import uuid
class Scene(PlanningSceneInterface):
......@@ -30,19 +31,20 @@ class Scene(PlanningSceneInterface):
pose.pose.position.z = 0.0 # gazebo reference is not the same as moveit
self.spawn('cafe_table', pose.pose)
def set_item(self):
def set_item(self, color='red'):
pose = PoseStamped()
pose.header.frame_id = self.frame
pose.pose.position.x = 0.75
pose.pose.position.y = 0.0
pose.pose.position.z = self.floor + self.table_height/2 + self.item_height/2
self.add_box('block', pose, (0.04, 0.04, self.item_height))
name = 'block{}'.format(uuid.uuid4())
self.add_box(name, pose, (0.04, 0.04, self.item_height))
pose.pose.position.x = 0.735
pose.pose.position.y = -0.025
pose.pose.position.z = self.table_height # gazebo reference is not the same as moveit
self.spawn('block', pose.pose, type='urdf')
self.spawn('block_' + color, pose.pose, model_name=name, type='urdf')
def spawn(self, model, pose, model_name=None, type='sdf'):
if model_name is None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment