Commit 9b60212a authored by fouad5's avatar fouad5
Browse files

merge

parents 0b13d313 9ff9b6a5
<robot name="block_blue">
<link name="block_blue">
<inertial>
<origin xyz="0.025 0.025 0.025" />
<mass value="0.5" />
<inertia ixx="0.5" ixy="0.0" ixz="0.0" iyy="0.5" iyz="0.0" izz="0.5" />
</inertial>
<visual>
<origin xyz="0.025 0.025 0.025"/>
<geometry>
<box size="0.05 0.05 0.05" />
</geometry>
</visual>
<collision>
<origin xyz="0.025 0.025 0.025"/>
<geometry>
<box size="0.05 0.05 0.05" />
</geometry>
</collision>
</link>
<gazebo reference="block_blue">
<material>Gazebo/Blue</material>
<mu1>1000</mu1>
<mu2>1000</mu2>
</gazebo>
</robot>
<robot name="block">
<link name="block">
<robot name="block_red">
<link name="block_red">
<inertial>
<origin xyz="0.025 0.025 0.025" />
<mass value="0.5" />
......@@ -18,7 +18,7 @@
</geometry>
</collision>
</link>
<gazebo reference="block">
<gazebo reference="block_red">
<material>Gazebo/Red</material>
<mu1>1000</mu1>
<mu2>1000</mu2>
......
#Learning imports
import features_detection as feat
from features_detection.recognizer import getFeatures
from decision_api.decision_tree_gr import DecisionTreeGR
from planning.baxter import Baxter
from planning.scene import Scene
import cv2
#Baxter and ROS imports
import baxter_interface
import rospy
import time
class BaxterAgent(object):
"""
......@@ -24,61 +25,89 @@ class BaxterAgent(object):
self.is_train = config['is_train']
self.navs = {'left': baxter_interface.Navigator('left'),
'right': baxter_interface.Navigator('right')}
'right': baxter_interface.Navigator('right'),
'torso_left': baxter_interface.Navigator('torso_left'),
'torso_right': baxter_interface.Navigator('torso_right')}
if self.is_train:
self.tree = DecisionTreeGR()
self.train()
else :
self.tree = self.load_decision_tree()
self.run()
def _train_sort_left(self, msg):
if msg:
self.baxter.sort('left')
def extract_features(self, img):
f = getFeatures(img)
return f
def _train_sort_right(self, msg):
def _train_sort(self, arm_name, msg):
arm = getattr(self.baxter, arm_name)
if msg:
self.baxter.sort('right')
def _train_cancel(self, msg):
arm.move(arm.top)
rospy.sleep(2)
img = arm.take_snapshot()
try:
f = self.extract_features(img)
f['direction'] = arm_name.upper()
print(f)
self.tree.add_training_sample(f)
self.baxter.sort(arm_name)
except:
print('no features found')
arm.move(arm.neutral)
def _train_end(self, msg):
if msg:
self.is_train = False
try:
self.tree.train()
self.save_decision_tree()
except:
print('could not train features')
def train(self):
self.drawTrainingUI()
self.navs['left'].button0_changed.connect(self._train_sort_left)
self.navs['right'].button0_changed.connect(self._train_sort_right)
self.navs['left'].button1_changed.connect(self._train_cancel)
self.navs['right'].button1_changed.connect(self._train_cancel)
train_left = lambda m: self._train_sort('left', m)
train_right = lambda m: self._train_sort('right', m)
self.navs['left'].button0_changed.connect(train_left)
self.navs['right'].button0_changed.connect(train_right)
self.navs['left'].button1_changed.connect(self._train_end)
self.navs['right'].button1_changed.connect(self._train_end)
# blocking call until end button is clicked
while self.is_train:
######
#TODO# take snapshot and extract features if object was added to the scene. Update scene accordingly, send image to baxte face
######
features = {}
pass
print('done training')
#self.tree.train()
#self.save_decision_tree() #Store trained decision tree
def _sort(self, msg):
if msg:
self.baxter.left.move(self.baxter.left.top)
img = self.baxter.left.take_snapshot()
try:
features = self.extract_features(img)
cv2.imwrite('tosort.jpg', img)
print(features)
direction = self.tree.classify(features).lower()
print(direction)
if direction != 'left':
self.baxter.left.move(self.baxter.left.neutral)
self.baxter.sort(direction)
except Exception as e:
print(e)
self.baxter.both.move(self.baxter.both.neutral)
def _run_end(self, m):
if m:
self.is_run = False
def run(self):
self.drawProductionUI()
run = True
while run:
#TODO# Check if object on table and retrieve features
features = {}
#Listen to navigators for both sides
left_nav = rospy.wait_for_message('/robot/navigators/left_itb/state', ITBState, 3)
right_nav = rospy.wait_for_message('/robot/navigators/right_itb/state', ITBState, 3)
if left_nav.buttons[0] or right_nav.buttons[0]:
side = self.tree.classify(features) #Choose sorting side
if side == 'RIGHT':
self.sort_right()
elif side == 'LEFT':
self.sort_left()
elif left_nav.buttons[1] or right_nav.buttons[1]:
run = False
self.init_move()
self.is_run = True
self.navs['torso_left'].button0_changed.connect(self._sort)
self.navs['torso_right'].button0_changed.connect(self._sort)
self.navs['torso_left'].button1_changed.connect(self._run_end)
self.navs['torso_right'].button1_changed.connect(self._run_end)
# blocking call until end button is clicked
while self.is_run:
pass
print('done')
def load_decision_tree(self):
......
......@@ -28,7 +28,7 @@ BIG = 2
SHAPE = 'shape'
CUBE_STR = 'CUBE'
SPHERE_STR = 'SPEHRE'
SPHERE_STR = 'SPHERE'
TRIANGLE_STR = 'TRIANGLE'
CUBE = 0
SPHERE = 1
......@@ -52,7 +52,7 @@ map_to_int = {
class DecisionTreeGR(object):
"""Wrapper class for the SciKit Learn decision tree implementation."""
def __init__(self, training_samples=None):
def __init__(self, training_samples=None, features=[COLOR, SIZE, SHAPE]):
"""Constructor.
Constructor method for the Decision Tree wraper class. If provided a
......@@ -60,6 +60,7 @@ class DecisionTreeGR(object):
"""
self.__decision_tree = DecisionTreeClassifier(criterion='gini')
self.__training_samples = training_samples
self._features = features
if not self.__training_samples:
self.__training_samples = []
......@@ -89,9 +90,8 @@ class DecisionTreeGR(object):
"""
df = pd.DataFrame(self.__training_samples)
df = self.__map_string_to_int(df)
features = [COLOR, SIZE, SHAPE]
y = df[DIRECTION]
X = df[features]
X = df[self._features]
self.__decision_tree.fit(X, y)
def __map_string_to_int(self, data_frame):
......@@ -118,7 +118,6 @@ class DecisionTreeGR(object):
Make sure that the tree is trained before calling this method (Call
train method before)
"""
input = {
key: [map_to_int[value]]
for (key, value) in input.iteritems()
......@@ -130,9 +129,11 @@ class DecisionTreeGR(object):
return LEFT_STR
def save(self, directory):
"""Save the object in the current state to a serialized version pkl."""
joblib.dump(self.__decision_tree, directory + "decision_tree.pkl")
def load(self, directory):
"""Load a serialized tree from a pre-defined location."""
joblib.load(directory + "descision_tree.pkl")
# USAGE EXAMPLE
......
#!/usr/bin/python
import moveit_commander
import rospy
import sys
from baxter_agent import BaxterAgent
from planning.scene import Scene
config = {
'is_train': True,
'tree_dir': './saved_trees/',
'scene': 'scene description' #Should contain a description of the scene to be loaded
}
def add_item(scene, color, msg):
if msg:
scene.set_item(color=color)
def main():
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('garbage_bot', anonymous=True)
b = BaxterAgent(config)
ab = lambda m: add_item(b.scene, 'blue', m)
ar = lambda m: add_item(b.scene, 'red', m)
b.navs['left'].button2_changed.connect(ab)
b.navs['right'].button2_changed.connect(ar)
b.train()
b.run()
if __name__ == '__main__':
main()
# USAGE
# python detect_shapes.py --image shapes_and_colors.png
# import the necessary packages
from shapedetector import ShapeDetector
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True,
# help="path to the input image")
#args = vars(ap.parse_args())
# load the image and resize it to a smaller factor so that
# the shapes can be approximated better
def detecting(image):
# image = cv2.imread(image)
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
cv2.imwrite('rezised.jpg', resized)
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 10, 200, cv2.THRESH_BINARY)[1]
#cv2.imshow("Image", thresh)
#cv2.waitKey(0)
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
# c = cnts[0]
M = cv2.moments(c)
if(M["m00"] > 2000):
cX = int((M["m10"] / (M["m00"])) * ratio)
cY = int((M["m01"] / (M["m00"])) * ratio)
# print(M)
shape = sd.detect(c).upper()
if shape == 'RECTANGLE' or shape == 'SQUARE' or shape == 'PENTAGON':
shape = 'CUBE'
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
# show the output image
# cv2.imshow("Image", image)
# cv2.waitKey(0)
return [shape, c, M["m00"]]
import numpy as np
import cv2
import argparse
from detect_shapes import detecting
def getSobel (channel):
......@@ -40,8 +41,81 @@ def getColor(colorValues, colorNames, maxValue):
if(item == maxValue):
return colorNames[i]
def segment (path):
img = cv2.imread(path)
def getCroppedImage(image):
# im1 = cv2.imread(image)
#image1 = imread("/path/to/image1")
#image2 = imread("/path/to/image2")
# image3 = im1 - im
img = image #cv2.imread(image)
# Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
crop_img = img[400:650, 300:550]
#cv2.imshow("cropped", crop_img)
#cv2.waitKey(0)
# cv2.imshow('frame',img)
# cv2.waitKey(0)
return crop_img
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
# fgbg = cv2.createBackgroundSubtractorGMG()
# fgmask = fgbg.apply(im1)
# fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# cv2.imshow('frame',fgmask)
# cv2.imshow("Image", img)
# cv2.waitKey(0)
# imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray,127,255,0)
# contours, hierarchy, _ = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# ctr = np.array(contours).reshape((-1,1,2)).astype(np.int32)
# cv2.drawContours(im1, [ctr], 0, (0,255,0), -1)
# cv2.imwrite("result.jpg", im1)
# cv2.waitKey(0)
def getNegative(img):
background = cv2.imread('background.jpg')
background = background[300:550, 300:550]
img = cv2.absdiff(background,img)
return img
def getFeatures (image):
size = shape = color = ""
copy = image
img = getCroppedImage(image)
img = getNegative(img)
# find object
detection = detecting(img)
shape = detection[0]
if(detection[2] > 7000):
size = "BIG"
elif(detection[2] > 5000):
size = "MEDIUM"
else:
size = "SMALL"
img = getCroppedImage(copy)
# img = cv2.imread(path)
# getObjectFromImage('background.jpg', path)
blurred = cv2.GaussianBlur(img, (5, 5), 0) # Remove noise
......@@ -61,24 +135,25 @@ def segment (path):
# Find contours
significant = findSignificantContours(img, sobel_8u)
# print("significant: ", significant)
# print("cnts: ", [shape[1]])
# Mask
mask = sobel.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
cv2.fillPoly(mask, [detection[1]], 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
fname = path.split('/')[-1]
cv2.imwrite('output/' + fname, img);
print (path)
# fname = path.split('/')[-1]
# cv2.imwrite('output/' + fname, img);
# cv2.imshow("Image", img)
cv2.imwrite('camera_image_no_background.jpeg', img)
# cv2.waitKey(0)
#cv2.imshow("Image", img)
# cv2.waitKey(0)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
......@@ -88,40 +163,46 @@ def segment (path):
# load the image
image = img # cv2.imread(args["image"])
# define the list of boundaries
# BGR
boundaries = [
([17, 15, 100], [255, 56, 200]), #red
([86, 31, 4], [220, 88, 50]), #blue
([25, 146, 190], [62, 174, 250]), #yellowish
([0, 0, 140], [140, 140, 255]), #red
([140, 0, 0], [255, 140, 140]), #blue
([0, 140, 0], [140, 255, 140]), #green
([103, 86, 65], [145, 133, 128]) #grey
]
colorValues = []
# loop over the boundaries
for i, (lower, upper) in enumerate(boundaries):
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
#print(cl.label(image, mask))
# save the values
colorValues.append(np.count_nonzero(mask))
#print(np.count_nonzero(mask))
output = cv2.bitwise_and(image, image, mask = mask)
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
#cv2.imshow("images", np.hstack([image, output]))
#cv2.waitKey(0)
# show the images
# cv2.imshow("images", np.hstack([image, output]))
cv2.imwrite('camera_image_color_{}.jpeg'.format(i), np.hstack([image, output]))
# cv2.waitKey(0)
color = getColor(colorValues, ['RED', 'BLUE', 'GREEN', 'GREY'], max(colorValues))
print(getColor(colorValues, ['RED', 'BLUE', 'YELLOW(ish)', 'GREY'], max(colorValues)))
return {'shape': shape, 'color': color, 'size': size}
segment('camera_image.jpeg')
#image = cv2.imread('sphere3.jpg')
#print(getFeatures(image))
#!/usr/bin/python
import moveit_commander
import rospy
import sys
from planning.baxter import Baxter
from baxter_agent import BaxterAgent
from planning.scene import Scene
config = {
'is_train': True,
'tree_dir': './saved_trees/',
'scene': 'scene description' #Should contain a description of the scene to be loaded
}
def add_item(scene, color, msg):
if msg:
scene.set_item(color=color)
def main():
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('garbage_bot', anonymous=True)
b = Baxter()
b.left.move(b.left.top)
b.ready_camera()
if __name__ == '__main__':
main()
......@@ -14,8 +14,9 @@ config = {
def main():
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('garbage_bot', anonymous=True)
BaxterAgent(config)
b = BaxterAgent(config)
b.train()
b.run()
if __name__ == '__main__':
......
......@@ -12,12 +12,10 @@ import cv_bridge as cvb
import cv2
class Arm(moveit_commander.MoveGroupCommander):
def __init__(self, name, neutral=None, top=None, pick=None, grasp=None):
def __init__(self, name, **kwargs):
super(Arm, self).__init__(name)
self.neutral = neutral
self.pick = pick
self.top = top
self.grasp = grasp
for i,v in kwargs.items():
setattr(self, i