Python numpy 模块,int0() 实例源码
我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用numpy.int0()。
def img_contour_select(ctrs, im):
# ????????????
cand_rect = []
for item in ctrs:
epsilon = 0.02*cv2.arcLength(item, True)
approx = cv2.approxPolyDP(item, epsilon, True)
if len(approx) <= 8:
rect = cv2.minAreaRect(item)
if rect[1][0] < 20 or rect[1][1] < 20:
continue
if rect[1][0] > 150 or rect[1][1] > 150:
continue
#ratio = (rect[1][1]+0.00001) / rect[1][0]
#if ratio > 1 or ratio < 0.9:
# continue
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
cand_rect.append(box)
img_show_hook("????", im)
return cand_rect
def draw_markers(img,markers):
for m in markers:
centroid = np.array(m['centroid'],dtype=np.float32)
origin = np.array(m['verts'][0],dtype=np.float32)
hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32)
hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
if m['id_confidence']>.9:
cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True)
else:
cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True)
# cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2)
m_str = 'id: {:d}'.format(m['id'])
org = origin.copy()
# cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1)
cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
if 'id_confidence' in m:
m_str = 'idc: {:.3f}'.format(m['id_confidence'])
org += (0, 12)
cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
if 'loc_confidence' in m:
m_str = 'locc: {:.3f}'.format(m['loc_confidence'])
org += (0, 12 )
cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
if 'frames_since_true_detection' in m:
m_str = 'otf: {}'.format(m['frames_since_true_detection'])
org += (0, 12 )
cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
if 'opf_vel' in m:
m_str = 'otf: {}'.format(m['opf_vel'])
org += (0, 12 )
cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
def img_contour_select(ctrs, im):
# ????????????
cand_rect = []
for item in ctrs:
epsilon = 0.02*cv2.arcLength(item, True)
approx = cv2.approxPolyDP(item, epsilon, True)
if len(approx) <= 8:
rect = cv2.minAreaRect(item)
#????????
if rect[2] < -10 and rect[2] > -80:
continue
if rect[1][0] < 10 or rect[1][1] < 10:
continue
#ratio = (rect[1][1]+0.00001) / rect[1][0]
#if ratio > 1 or ratio < 0.9:
# continue
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
cand_rect.append(box)
img_show_hook("????", im)
return cand_rect
def remove_border(contour, ary):
"""Remove everything outside a border contour."""
# Use a rotated rectangle (should be a good approximation of a border).
# If it's far from a right angle, it's probably two sides of a border and
# we should use the bounding box instead.
c_im = np.zeros(ary.shape)
r = cv2.minAreaRect(contour)
degs = r[2]
if angle_from_right(degs) <= 10.0:
box = cv2.boxPoints(r)
box = np.int0(box)
cv2.drawContours(c_im, [box], 0, 255, -1)
cv2.drawContours(c_im, [box], 0, 0, 4)
else:
x1, y1, x2, y2 = cv2.boundingRect(contour)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)
return np.minimum(c_im, ary)
def getMask(self, shape):
p=self.state['pos']
s=self.state['size']
center=p + s / 2
a=self.state['angle']
# opencv convention:
shape = (shape[1], shape[0])
arr1 = np.zeros(shape, dtype=np.uint8)
arr2 = np.zeros(shape, dtype=np.uint8)
# draw rotated rectangle:
vertices = np.int0(cv2.boxPoints((center, s, a)))
cv2.drawContours(arr1, [vertices], 0, color=1, thickness=-1)
# draw ellipse:
cv2.ellipse(arr2, (int(center[0]), int(center[1])), (int(s[0] / 2 * self._ratioEllispeRectangle),
int(s[1] / 2 * self._ratioEllispeRectangle)), int(a),
startAngle=0, endAngle=360, color=1, thickness=-1)
# bring both together:
return np.logical_and(arr1, arr2).T
def getMask(self, shape):
p = self.state['pos']
s = self.state['size']
center = p + s / 2
a = self.state['angle']
# opencv convention:
shape = (shape[1], shape[0])
arr = np.zeros(shape, dtype=np.uint8)
# draw rotated rectangle:
vertices = np.int0(cv2.boxPoints((center, s, a)))
cv2.drawContours(arr, [vertices],
0,
color=1,
thickness=-1)
return arr.astype(bool).T
def deal(self,frame):
frame=frame.copy()
track_window=self.track_window
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
roi_hist=self.roi_hist
dst = cv2.calcBackProject([frame],[0],roi_hist,[0,180],1)
if self.m=='m':
ret, track_window_r = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window_r
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
elif self.m=='c':
ret, track_window_r = cv2.CamShift(dst, track_window, term_crit)
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)
rectsNew=[]
center1=(track_window[0]+track_window[2]//2,track_window[1]+track_window[3]//2)
center2=(track_window_r[0]+track_window_r[2]//2,track_window_r[1]+track_window_r[3]//2)
img2 = cv2.line(img2,center1,center2,color=0)
rectsNew=track_window_r
# x,y,w,h = track_window
# img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
return rectsNew
def remove_border(contour, ary):
"""Remove everything outside a border contour."""
# Use a rotated rectangle (should be a good approximation of a border).
# If it's far from a right angle, it's probably two sides of a border and
# we should use the bounding box instead.
c_im = np.zeros(ary.shape)
r = cv2.minAreaRect(contour)
degs = r[2]
if angle_from_right(degs) <= 10.0:
box = cv2.cv.BoxPoints(r)
box = np.int0(box)
cv2.drawContours(c_im, [box], 0, 255, -1)
cv2.drawContours(c_im, [box], 0, 0, 4)
else:
x1, y1, x2, y2 = cv2.boundingRect(contour)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)
return np.minimum(c_im, ary)
def density_slice(rast, rel=np.less_equal, threshold=1000, nodata=-9999):
'''
Returns a density slice from a given raster. Arguments:
rast A gdal.Dataset or a NumPy array
rel A NumPy logic function; defaults to np.less_equal
threshold An integer number
'''
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
rastr = rast.ReadAsArray()
else:
rastr = rast.copy()
if (len(rastr.shape) > 2 and min(rastr.shape) > 1):
raise ValueError('Expected a single-band raster array')
return np.logical_and(
rel(rastr, np.ones(rast.shape) * threshold),
np.not_equal(rastr, np.ones(rast.shape) * nodata)).astype(np.int0)
def shapeFiltering(img):
contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
if len(contours) == 0:
return "yoopsie"
#else:
#print contours
"""blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
cv2.drawContours(blank_image, contours, -1, (255, 255, 255))
cv2.imshow("imagiae", blank_image)
cv2.waitKey()"""
good_shape = []
for c in contours:
x,y,w,h = cv2.boundingRect(c)
"""rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
w = """
#if h == 0:
# continue
ratio = w / h
ratio_grade = ratio / (TMw / TMh)
if 0.2 < ratio_grade < 1.8:
good_shape.append(c)
"""blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
cv2.drawContours(blank_image, good_shape, -1, (255, 255, 255))
cv2.imshow("imagia", blank_image)
cv2.waitKey()"""
return good_shape
def findCorners(contour):
"""blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
cv2.drawContours(blank_image, contour, -1, (255, 255, 255))
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5)
dst = cv2.warpAffine(blank_image,M,(cols,rows))
cv2.imshow("rotatio", dst)
cv2.waitKey()"""
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
height_px_1 = box[0][1] - box[3][1]
height_px_2 = box[1][1] - box[2][1]
print height_px_1, height_px_2
if height_px_1 < height_px_2:
close_height_px = height_px_2
far_height_px = height_px_1
else:
close_height_px = height_px_1
far_height_px = height_px_2
return close_height_px, far_height_px
def findCorners(contour):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = numpy.int0(box)
height_px_1 = box[0][1] - box[3][1]
height_px_2 = box[1][1] - box[2][1]
print height_px_1, height_px_2
if height_px_1 < height_px_2:
close_height_px = height_px_2
far_height_px = height_px_1
else:
close_height_px = height_px_1
far_height_px = height_px_2
return close_height_px, far_height_px
def update(roi):
img1b.setImage(roi.getArrayRegion(arr, img1a), levels=(0, arr.max()))
img1c.setImage(np.int0(r.getMask(arr.shape)))
# cell.sigRegionChanged.connect(update)
# update(cell)
def get_bounding_rect(contour):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
return np.int0(box)
def shi_tomasi(gray):
# image????
# maxCorners???????
# qualityLevel?????????????????????
# minDistance??????????
corners = cv2.goodFeaturesToTrack(gray,25,0.01,10)
cv2.computeCorrespondEpilines()
# ?????? [[ 311., 250.]] ????????
corners = np.int0(corners)
return corners
def calculateFrame(self,cap):
data = self.getDataPoints()
#targetCascade = cv2.CascadeClassifier(cascPath)
frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
lower_bound = np.array([float(data['HMIN']),float(data["SMIN"]),float(data['VMIN'])])
upper_bound = np.array([float(data['HMAX']),float(data["SMAX"]),float(data['VMAX'])])
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv,lower_bound,upper_bound)
largest_area = 0
xCenter = -1
yCenter = -1
targetRect = None
ret,thresh = cv2.threshold(mask,200,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 1:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
rect = cv2.minAreaRect(cnt)
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
xCenter = (box[0][0] + box[1][0] + box[2][0] + box[3][0]) /4
yCenter = (box[0][1] + box[1][1] + box[2][1] + box[3][1]) /4
cv2.drawContours(frame,[box],0,(0,255,0),2)
output = {}
distance = 0.0025396523 * yCenter**2 + 0.1000098497 *yCenter + 46.8824851568
theta = math.atan2(xCenter-160, distance)
output_dict = {"xCenter": xCenter, "yCenter": yCenter,"theta": theta, "distance":distance}
output = json.dumps(output_dict)
return frame ,output , True, mask
def cfmask(mask, mask_values=(1,2,3,4,255), nodata=-9999):
'''
Returns a binary mask according to the CFMask algorithm results for the
image; mask has True for water, cloud, shadow, and snow (if any) and False
everywhere else. More information can be found:
https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
Landsat 4-7 Pre-Collection pixel_qa values to be masked:
mask_values = (1, 2, 3, 4)
Landsat 4-7 Collection 1 pixel_qa values to be masked (for "Medium" confidence):
mask_values = (1, 68, 72, 80, 112, 132, 136, 144, 160, 176, 224)
Landsat 8 Collection 1 pixel_qa values to be masked (for "Medium" confidence):
mask_values = (1, 324, 328, 386, 388, 392, 400, 416, 432, 480, 832, 836, 840, 848, 864, 880, 900, 904, 912, 928, 944, 992, 1024)
Arguments:
mask A gdal.Dataset or a NumPy array
mask_path The path to an EOS HDF4 CFMask raster
mask_values The values in the mask that correspond to NoData pixels
nodata The NoData value; defaults to -9999.
'''
if not isinstance(mask, np.ndarray):
maskr = mask.ReadAsArray()
else:
maskr = mask.copy()
# Mask according to bit-packing described here:
# https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
maskr = np.in1d(maskr.reshape((maskr.shape[0] * maskr.shape[1])), mask_values)\
.reshape((1, maskr.shape[0], maskr.shape[1])).astype(np.int0)
return maskr
def getTargetBox(target):
minRect = cv2.minAreaRect(target)
box = cv2.cv.BoxPoints(minRect)
#box = np.int0(box) # convert points to ints
return box
def validate_contour(contour, img, aspect_ratio_range, area_range):
rect = cv2.minAreaRect(contour)
img_width = img.shape[1]
img_height = img.shape[0]
box = cv2.boxPoints(rect)
box = np.int0(box)
X = rect[0][0]
Y = rect[0][1]
angle = rect[2]
width = rect[1][0]
height = rect[1][1]
angle = (angle + 180) if width < height else (angle + 90)
output=False
if (width > 0 and height > 0) and ((width < img_width/2.0) and (height < img_width/2.0)):
aspect_ratio = float(width)/height if width > height else float(height)/width
if (aspect_ratio >= aspect_ratio_range[0] and aspect_ratio <= aspect_ratio_range[1]):
if((height*width > area_range[0]) and (height*width < area_range[1])):
box_copy = list(box)
point = box_copy[0]
del(box_copy[0])
dists = [((p[0]-point[0])**2 + (p[1]-point[1])**2) for p in box_copy]
sorted_dists = sorted(dists)
opposite_point = box_copy[dists.index(sorted_dists[1])]
tmp_angle = 90
if abs(point[0]-opposite_point[0]) > 0:
tmp_angle = abs(float(point[1]-opposite_point[1]))/abs(point[0]-opposite_point[0])
tmp_angle = rad_to_deg(math.atan(tmp_angle))
if tmp_angle <= 45:
output = True
return output
def bboxes_to_xys(bboxes, image_shape):
"""Convert Seglink bboxes to xys, i.e., eight points
The `image_shape` is used to to make sure all points return are valid, i.e., within image area
"""
if len(bboxes) == 0:
return []
assert np.ndim(bboxes) == 2 and np.shape(bboxes)[-1] == 5, 'invalid `bboxes` param with shape = ' + str(np.shape(bboxes))
h, w = image_shape[0:2]
def get_valid_x(x):
if x < 0:
return 0
if x >= w:
return w - 1
return x
def get_valid_y(y):
if y < 0:
return 0
if y >= h:
return h - 1
return y
xys = np.zeros((len(bboxes), 8))
for bbox_idx, bbox in enumerate(bboxes):
bbox = ((bbox[0], bbox[1]), (bbox[2], bbox[3]), bbox[4])
points = cv2.cv.BoxPoints(bbox)
points = np.int0(points)
for i_xy, (x, y) in enumerate(points):
x = get_valid_x(x)
y = get_valid_y(y)
points[i_xy, :] = [x, y]
points = np.reshape(points, -1)
xys[bbox_idx, :] = points
return xys
def __init__(self, fname=None, include_orth=True, include_pols=True):
if fname is None:
# fname is the name of the file to read in the design matrix
self.design = np.zeros([0, 0])
self.n_col = 0
# number of columns (conditions) in the design matrix
self.column_types = np.ones(0)
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
else:
# isAFNI = re.match(r'.+[.](1D|1d|txt)$', fname)
filename, ext = os.path.splitext(fname)
# We assume all AFNI 1D files have extension of 1D or 1d or txt
if ext in ['.1D', '.1d', '.txt']:
self.read_afni(fname=fname)
self.include_orth = include_orth
self.include_pols = include_pols
# The two flags above dictates whether columns corresponding to
# baseline drift modeled by polynomial functions of time and
# columns corresponding to other orthogonal signals (usually motion)
# are included in nuisance regressors.
self.cols_task = np.where(self.column_types == 1)[0]
self.design_task = self.design[:, self.cols_task]
if np.ndim(self.design_task) == 1:
self.design_task = self.design_task[:, None]
# part of the design matrix related to task conditions.
self.n_TR = np.size(self.design_task, axis=0)
self.cols_nuisance = np.array([])
if self.include_orth:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == 0)[0])))
if self.include_pols:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == -1)[0])))
if np.size(self.cols_nuisance) > 0:
self.reg_nuisance = self.design[:, self.cols_nuisance]
if np.ndim(self.reg_nuisance) == 1:
self.reg_nuisance = self.reg_nuisance[:, None]
else:
self.reg_nuisance = None
# Nuisance regressors for motion, baseline, etc.
def _find_array_button_thing(self):
""" Find the array button on the solar array box """
""" This uses color to determine if we have a choke """
lower = np.array([0, 0, 60], dtype = "uint8")
upper = np.array([20, 20, 255], dtype = "uint8")
mask = cv2.inRange(self.img, lower, upper)
blurred = cv2.GaussianBlur(mask, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if is_cv2() else contours[1]
debug_img = None
if self.debug:
debug_img = self.img.copy()
button_box = None
for c in contours:
box = cv2.boundingRect(c)
if button_box is None:
button_box = box
else:
button_box = self._union_box(deepcopy(button_box), box)
if button_box is None:
return
top,bottom,left,right,center = self.find_dimensions(np.int0(np.array(self._bound_to_boxpoints(button_box))))
if top is None or left is None or center is None:
return None
height = self.find_distance(top, bottom)
width = self.find_distance(left, right)
if self.debug:
for c in contours:
cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
cv2.circle(debug_img, top, 5, (255, 255, 0))
cv2.circle(debug_img, bottom, 5, (255, 255, 0))
cv2.circle(debug_img, left, 5, (255, 255, 0))
cv2.circle(debug_img, right, 5, (255, 255, 0))
cv2.rectangle(debug_img, (button_box[0], button_box[1]),
(button_box[0] + button_box[2], button_box[1] + button_box[3]), (128, 0, 128), 2)
#cv2.circle(debug_img, center, 5, (255, 255, 0))
cv2.imshow("button picture", debug_img)
cv2.setMouseCallback("button picture", self.handle_mouse)
cv2.waitKey(0)
cv2.destroyAllWindows()
self.array_button = Thing(height, width, center, None)
self.array_button.set_array_button()
self.array_button.computed_center = self.compute_center(left, right, top, bottom)
self.things.append(self.array_button)
def _find_a_thing(self, c, min_height, max_height, min_width, max_width, max_distance, debug_img=None):
rect = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(rect) if is_cv2() else cv2.boxPoints(rect)
top,bottom,left,right,center = self.find_dimensions(np.int0(np.array(box)))
if top is None or left is None or center is None:
return None
vertical = self.find_distance(top, bottom)
horizontal = self.find_distance(left, right)
away = self.find_distance(center, None)
if vertical > horizontal:
height = vertical
width = horizontal
flipped = False
else:
height = horizontal
width = vertical
flipped = True
if height < min_height or height > max_height:
return None
if width < min_width or width > max_height:
return None
if away > max_distance:
return None
# This page was helpful in understanding angle
# https://namkeenman.wordpress.com/2015/12/18/open-cv-determine-angle-of-rotatedrect-minarearect/
angle = rect[2]
if rect[1][0] < rect[1][1]:
angle -= 90.0
if debug_img is not None:
x,y,w,h = cv2.boundingRect(c)
cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
cv2.drawContours(debug_img, [np.int0(np.array(box))], -1, (0, 0, 255), 2)
cv2.rectangle(debug_img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.circle(debug_img, top, 5, (255, 255, 0))
cv2.circle(debug_img, bottom, 5, (255, 255, 0))
cv2.circle(debug_img, left, 5, (255, 255, 0))
cv2.circle(debug_img, right, 5, (255, 255, 0))
cv2.circle(debug_img, center, 5, (255, 255, 0))
return Thing(height, width, center, angle)
def get_contours(orig_image):
"""
Get edge points (hopefully corners) from the given opencv image (called
contours in opencv)
Parameters:
:param: `orig_image` - the thresholded image from which to find contours
"""
new_image = numpy.copy(orig_image)
# cv2.imshow("Vision", new_image)
# cv2.waitKey(1000)
new_image, contours, hierarchy = cv2.findContours(new_image,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours))
# print(len(contours[0]))
# print(len(contours[0][0]))
# print(len(contours[0][0][0]))
largest_contour = 0
most_matching = 0
min_score = 0
max_area = 0
if len(contours) > 1:
print("Length of contours:", len(contours))
max_area = cv2.contourArea(contours[0])
min_score = average_goal_matching(contours[0])
for i in range(1, len(contours)):
# print(contours[i])
current_score = average_goal_matching(contours[i])
current_area = cv2.contourArea(contours[i])
if current_area > max_area:
max_area = current_area
largest_contour = i
if current_score < min_score and current_score != 0 and current_area > 300 and current_area < 1500:
min_score = current_score
most_matching = i
elif len(contours) == 0:
raise GoalNotFoundException("Goal not found!")
if min_score >= 9999999999999999:
raise GoalNotFoundException("Goal not found!")
print("largest_contour:", largest_contour)
print("Area:", max_area)
# print("largest_contour:", largest_contour)
print("Most matching:", most_matching)
print("Score:", min_score)
print("Area of most matching:", cv2.contourArea(contours[most_matching]))
rect = cv2.minAreaRect(contours[most_matching])
box = cv2.boxPoints(rect)
box = numpy.int0(box)
# print(box)
return numpy.array(contours[most_matching]), box
def detectAllVertices(self, testImg):
# Detecting vertices on the newly constructed board
self.gray = cv2.cvtColor(testImg, cv2.COLOR_BGR2GRAY)
tempVertices = cv2.goodFeaturesToTrack(self.gray, int(self.FINAL_VERTICES_COUNT), 0.01, 10)
tempVertices = np.int0(tempVertices)
newVertices = []
for i in tempVertices:
x, y = i.ravel()
newVertices.append((x, y))
# Matrix to store coordinates of vertices on the board
self.ALL_VERTICES = [[(0, 0) for x in range(self.FACTOR + 2)] for x in range(self.FACTOR + 2)]
# Filling the matrix
self.ALL_VERTICES[0][0] = (self.CORNERS[1])
for i in range(0, self.FACTOR):
for j in range(0, self.FACTOR):
predicted_x = self.ALL_VERTICES[i][j][0] + int(
(self.OUTER_VERTICES[2][self.FACTOR - i][0] - self.OUTER_VERTICES[0][i][0]) / 8)
predicted_y = self.ALL_VERTICES[i][j][1] + int(
(self.OUTER_VERTICES[3][self.FACTOR - i][1] - self.OUTER_VERTICES[1][i][1]) / 8)
minn_dist = self.INT_MAX
for point in newVertices:
this_dist = Geometry.getPointsDistance(point, (predicted_x, self.ALL_VERTICES[i][j][1]))
if this_dist < minn_dist:
self.ALL_VERTICES[i][j + 1] = point
minn_dist = this_dist
minn_dist = self.INT_MAX
for point in newVertices:
this_dist = Geometry.getPointsDistance(point, (self.ALL_VERTICES[i][j][0], predicted_y))
if this_dist < minn_dist:
self.ALL_VERTICES[i + 1][j] = point;
minn_dist = this_dist
self.ALL_VERTICES[self.FACTOR][self.FACTOR] = (self.CORNERS[3])
def image_callback(self, msg):
# convert ROS image to OpenCV image
try:
image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
except CvBridgeError as e:
print(e)
# create hsv image of scene
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# find pink objects in the image
lower_pink = numpy.array([139, 0, 240], numpy.uint8)
upper_pink = numpy.array([159, 121, 255], numpy.uint8)
mask = cv2.inRange(hsv, lower_pink, upper_pink)
# dilate and erode with kernel size 11x11
cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11)))
# find all of the contours in the mask image
contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contourLength = len(contours)
# Check for at least one target found
if self.contourLength < 1:
print "No target found"
else: # target found
## Loop through all of the contours, and get their areas
area = [0.0]*len(contours)
for i in range(self.contourLength):
area[i] = cv2.contourArea(contours[i])
#### Target #### the largest "pink" object
target_image = contours[area.index(max(area))]
# Using moments find the center of the object and draw a red outline around the object
target_m = cv2.moments(target_image)
self.target_u = int(target_m['m10']/target_m['m00'])
self.target_v = int(target_m['m01']/target_m['m00'])
points = cv2.minAreaRect(target_image)
box = cv2.cv.BoxPoints(points)
box = numpy.int0(box)
cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
rospy.loginfo("Center of target is x at %d and y at %d", int(self.target_u), int(self.target_v))
self.target_found = True # set flag for depth_callback processing
# show image with target outlined with a red rectangle
cv2.imshow ("Target", image)
cv2.waitKey(3)
# This callback function handles processing Kinect depth image, looking for the depth value
# at the location of the center of the pink target.
def binary_mask(rast, mask, nodata=-9999, invert=False):
'''
Applies an arbitrary, binary mask (data in [0,1]) where pixels with
a value of 1 are pixels to be masked out. Arguments:
rast A gdal.Dataset or a NumPy array
mask A gdal.Dataset or a NumPy array
nodata The NoData value; defaults to -9999.
invert Invert the mask? (tranpose meaning of 0 and 1); defaults to False.
'''
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
rastr = rast.ReadAsArray()
else:
rastr = rast.copy()
if not isinstance(mask, np.ndarray):
maskr = mask.ReadAsArray()
else:
maskr = mask.copy()
if not np.alltrue(np.equal(rastr.shape[-2:], maskr.shape[-2:])):
raise ValueError('Raster and mask do not have the same shape')
# Convert Boolean arrays to ones and zeros
if maskr.dtype == bool:
maskr = maskr.astype(np.int0)
# Transform into a "1-band" array and apply the mask
if maskr.shape != rastr.shape:
maskr = maskr.reshape((1, maskr.shape[-2], maskr.shape[-1]))\
.repeat(rastr.shape[0], axis=0) # Copy the mask across the "bands"
# TODO Compare to place(), e.g.,
# np.place(rastr, mask.repeat(rastr.shape[0], axis=0), (nodata,))
# Mask out areas that match the mask (==1)
if invert:
rastr[maskr < 1] = nodata
else:
rastr[maskr > 0] = nodata
return rastr
def filterContoursFancy(contours, image=None):
if len(contours) == 0:
return []
numContours = len(contours)
areas = np.array([cv2.contourArea(contour) for contour in contours])
boundingRects = [cv2.boundingRect(contour) for contour in contours]
widths, heights, positions = boundingInfo(boundingRects)
rotatedRects = [cv2.minAreaRect(contour) for contour in contours]
if config.withOpenCV3:
rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects]
else:
rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects]
rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes]
sizeScores = [size(area)for area in areas]
ratioScores = ratios(widths, heights)
rotationScores = [rotation(rect) for rect in rotatedRects]
rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)]
areaScores = polygonAreaDiff(areas, rotatedAreas)
quadScores = [Quadrify(contour) for contour in contours]
rectangularScores = np.divide(rectangularScores, widths)
scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores])
contourScores = np.dot(weights, scores)
correctInds, incorrectInds = sortedInds(contourScores)
correctContours = np.array(contours)[correctInds]
if config.extra_debug:
print "size, ratio, rotation, rectangular, area, quad"
print "Weights:", weights
print "Scores: ", contourScores
print np.average(scores, axis=1)
if len(incorrectInds) != 0:
print "AVG, WORST", test(scores, correctInds, incorrectInds)
for i in range(numContours):
print "CONTOUR " + str(i)
print np.multiply(scores[:, i], weights) #newWeights
print contourScores[i]
if image:
img = copy.deepcopy(image)
Printing.drawImage(img, contours[:i] + contours[i+1:], contours[i], False)
Printing.display(img, "contour " + str(i), doResize=True)
cv2.waitKey(0)
cv2.destroyAllWindows()
return correctContours
def filterContoursAutocalibrate(contours, image=None):
if len(contours) == 0:
return []
numContours = len(contours)
areas = np.array([cv2.contourArea(contour) for contour in contours])
boundingRects = [cv2.boundingRect(contour) for contour in contours]
widths, heights, positions = boundingInfo(boundingRects)
rotatedRects = [cv2.minAreaRect(contour) for contour in contours]
if config.withOpenCV3:
rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects]
else:
rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects]
rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes]
sizeScores = [size(area)for area in areas]
ratioScores = ratios(widths, heights)
rotationScores = [rotation(rect) for rect in rotatedRects]
rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)]
areaScores = polygonAreaDiff(areas, rotatedAreas)
quadScores = [Quadrify(contour) for contour in contours]
rectangularScores = np.divide(rectangularScores, widths)
scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores])
contourScores = np.dot(weights, scores)
correctInds, incorrectInds = sortedInds(contourScores)
correctContours = np.array(contours)[correctInds]
averageScore = 0
for i in range(numContours):
averageScore += sizeScores[i]
averageScore += ratioScores[i]
averageScore += rotationScores[i]
averageScore += rectangularScores[i]
averageScore += areaScores[i]
averageScore += quadScores[i]
averageScore /= numContours
return averageScore
def image_callback(self, msg):
# convert ROS image to OpenCV image
try:
image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
except CvBridgeError as e:
print(e)
# create hsv image of scene
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# find pink objects in the image
lower_pink = numpy.array([139, 0, 240], numpy.uint8)
upper_pink = numpy.array([159, 121, 255], numpy.uint8)
mask = cv2.inRange(hsv, lower_pink, upper_pink)
# dilate and erode with kernel size 11x11
cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11)))
# find all of the contours in the mask image
contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contourLength = len(contours)
# Check for at least one target found
if self.contourLength < 1:
print "No target found"
else: # target found
## Loop through all of the contours, and get their areas
area = [0.0]*len(contours)
for i in range(self.contourLength):
area[i] = cv2.contourArea(contours[i])
#### Target #### the largest "pink" object
target_image = contours[area.index(max(area))]
# Using moments find the center of the object and draw a red outline around the object
target_m = cv2.moments(target_image)
self.target_u = int(target_m['m10']/target_m['m00'])
self.target_v = int(target_m['m01']/target_m['m00'])
points = cv2.minAreaRect(target_image)
box = cv2.cv.BoxPoints(points)
box = numpy.int0(box)
cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
rospy.loginfo("Center of target is x at %d and y at %d", int(self.target_u), int(self.target_v))
self.target_found = True # set flag for depth_callback processing
# show image with target outlined with a red rectangle
cv2.imshow ("Target", image)
cv2.waitKey(3)
# This callback function handles processing Kinect depth image, looking for the depth value
# at the location of the center of the pink target.
def detect_barcode(imageval):
# load the image and convert it to grayscale
file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)
# compute the Scharr gradient magnitude representation of the images
# in both the x and y direction
gradX = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 1, dy = 0, ksize = -1)
gradY = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 0, dy = 1, ksize = -1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
# construct a closing kernel and apply it to the thresholded image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations = 4)
closed = cv2.dilate(closed, None, iterations = 4)
# find the contours in the thresholded image, then sort the contours
# by their area, keeping only the largest one
(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.cv.BoxPoints(rect))
# draw a bounding box arounded the detected barcode and display the
# image
cv2.drawContours(img_data_ndarray, [box], -1, (0, 255, 0), 3)
# cv2.imshow("Image", image)
#cv2.imwrite("uploads/output-"+ datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") +".jpg",image)
# cv2.waitKey(0)
#outputfile = "uploads/output-" + time.strftime("%H:%M:%S") + ".jpg"
outputfile = "uploads/output.jpg"
cv2.imwrite(outputfile,img_data_ndarray)