Commit ecb98207 authored by Mathieu Reymond's avatar Mathieu Reymond
Browse files

interact with buttons to train baxter

parent 19620bea
......@@ -7,8 +7,6 @@ from planning.scene import Scene
#Baxter and ROS imports
import baxter_interface
import rospy
from baxter_core_msgs.msgs import DigitalIOState, ITBState
from baxter_examples import xdisplay_image
class BaxterAgent(object):
"""
......@@ -21,9 +19,13 @@ class BaxterAgent(object):
self.scene = Scene(self.baxter.get_planning_frame())
rospy.sleep(2)
self.init_scene()
self.baxter.both.move(self.baxter.both.neutral)
self.is_train = config['is_train']
self.navs {'left': baxter_interface.Navigator('left')
'right': baxter_interface.Navigator('right')}
if self.is_train:
self.tree = DecisionTreeGR()
self.train()
......@@ -31,31 +33,33 @@ class BaxterAgent(object):
self.tree = self.load_decision_tree()
self.run()
def _train_sort_left(self, msg):
if msg:
self.baxter.sort('left')
def _train_sort_right(self, msg):
if msg:
self.baxter.sort('right')
def _train_cancel(self, msg):
if msg:
self.is_train = False
def train(self):
self.drawTrainingUI()
self.navs['left'].button0_changed.connect(self._train_sort_left)
self.navs['right'].button0_changed.connect(self._train_sort_right)
self.navs['left'].button1_changed.connect(self._train_cancel)
self.navs['right'].button1_changed.connect(self._train_cancel)
while self.is_train:
######
#TODO# take snapshot and extract features if object was added to the scene. Update scene accordingly, send image to baxte face
######
######
features = {}
right_button = rospy.wait_for_message('/robot/digital_io/right_shoulder_button/state', DigitalIOState, 3)
left_button = rospy.wait_for_message('/robot/digital_io/left_shoulder_button/state', DigitalIOState, 3)
#Use navigator cancel on left or right arm to stop training
left_nav = rospy.wait_for_message('/robot/navigators/left_itb/state', ITBState, 3)
right_nav = rospy.wait_for_message('/robot/navigators/right_itb/state', ITBState, 3)
if right_button.state == 1:
features['direction'] = 'right'
self.sort_right()
self.tree.add_training_sample(features)
elif left_button.state == 1:
features['direction'] = 'left'
self.sort_left()
self.tree.add_training_sample(features)
elif left_nav.buttons[1] or right_nav.buttons[1]:
self.is_train = False
self.init_move() #Go back to default position
self.tree.train()
self.save_decision_tree() #Store trained decision tree
print('done training')
#self.tree.train()
#self.save_decision_tree() #Store trained decision tree
def run(self):
self.drawProductionUI()
......@@ -97,35 +101,8 @@ class BaxterAgent(object):
def drawTrainingUI(self):
print 'Garbagebot now training. Please choose a side to sort after putting down an object'
xdisplay_image.send_image('path/to/initial/image')
#xdisplay_image.send_image('path/to/initial/image')
def drawProductionUI(self):
#TODO#
def sort_right(self):
self.baxter.right.move(self.baxter.right.pick)
self.baxter.right.gripper.open()
rospy.sleep(3)
self.baxter.right.move(self.baxter.right.grasp)
rospy.sleep(1)
self.baxter.right.gripper.close()
rospy.sleep(1)
self.baxter.right.move(self.baxter.right.pick)
self.baxter.right.move(self.baxter.right.neutral)
self.baxter.right.gripper.open()
def sort_left(self):
self.baxter.left.move(self.baxter.left.pick)
self.baxter.left.gripper.open()
rospy.sleep(3)
self.baxter.left.move(self.baxter.left.grasp)
rospy.sleep(1)
self.baxter.left.gripper.close()
rospy.sleep(1)
self.baxter.left.move(self.baxter.left.pick)
self.baxter.left.move(self.baxter.left.neutral)
self.baxter.left.gripper.open()
pass
import numpy as np
import cv2
import argparse
def getSobel (channel):
sobelx = cv2.Sobel(channel, cv2.CV_16S, 1, 0, borderType=cv2.BORDER_REPLICATE)
sobely = cv2.Sobel(channel, cv2.CV_16S, 0, 1, borderType=cv2.BORDER_REPLICATE)
sobel = np.hypot(sobelx, sobely)
return sobel;
def findSignificantContours (img, sobel_8u):
image, contours, heirarchy = cv2.findContours(sobel_8u, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
# From among them, find the contours with large surface area.
significant = []
tooSmall = sobel_8u.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.append([contour, area])
significant.sort(key=lambda x: x[1])
return [x[0] for x in significant];
def getColor(colorValues, colorNames, maxValue):
for i, item in enumerate(colorValues):
if(item == maxValue):
return colorNames[i]
def segment (path):
img = cv2.imread(path)
blurred = cv2.GaussianBlur(img, (5, 5), 0) # Remove noise
# Edge operator
sobel = np.max( np.array([ getSobel(blurred[:,:, 0]), getSobel(blurred[:,:, 1]), getSobel(blurred[:,:, 2]) ]), axis=0 )
mean = np.median(sobel)
# Zero any values less than mean. This reduces a lot of noise.
sobel[sobel <= mean] = 0;
sobel[sobel > 255] = 255;
cv2.imwrite('output/edge.png', sobel);
sobel_8u = np.asarray(sobel, np.uint8)
# Find contours
significant = findSignificantContours(img, sobel_8u)
# Mask
mask = sobel.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
fname = path.split('/')[-1]
cv2.imwrite('output/' + fname, img);
print (path)
# cv2.imshow("Image", img)
cv2.imwrite('camera_image_no_background.jpeg', img)
# cv2.waitKey(0)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image")
args = vars(ap.parse_args())
# load the image
image = img # cv2.imread(args["image"])
# define the list of boundaries
boundaries = [
([17, 15, 100], [255, 56, 200]), #red
([86, 31, 4], [220, 88, 50]), #blue
([25, 146, 190], [62, 174, 250]), #yellowish
([103, 86, 65], [145, 133, 128]) #grey
]
colorValues = []
# loop over the boundaries
for i, (lower, upper) in enumerate(boundaries):
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
#print(cl.label(image, mask))
# save the values
colorValues.append(np.count_nonzero(mask))
#print(np.count_nonzero(mask))
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
# cv2.imshow("images", np.hstack([image, output]))
cv2.imwrite('camera_image_color_{}.jpeg'.format(i), np.hstack([image, output]))
# cv2.waitKey(0)
print(getColor(colorValues, ['RED', 'BLUE', 'YELLOW(ish)', 'GREY'], max(colorValues)))
segment('camera_image.jpeg')
#!/usr/bin/python
import moveit_commander
import rospy
import sys
from baxter_agent import BaxterAgent
from baxter_agent import BaxterAgent
config = {
'is_train': True,
......@@ -19,4 +20,3 @@ def main():
if __name__ == '__main__':
main()
......@@ -118,3 +118,20 @@ class Baxter(object):
def get_planning_frame(self):
return self._commander.get_planning_frame()
def sort(self, arm):
if arm is 'left':
arm = self.left
else:
arm = self.right
arm.move(arm.pick)
arm.gripper.open()
rospy.sleep(3)
arm.move(arm.grasp)
rospy.sleep(1)
arm.close()
rospy.sleep(1)
arm.move(arm.pick)
arm.move(arm.neutral)
arm.open()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment