我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用skimage.exposure.rescale_intensity()。
def rgb2illumination_invariant(img, alpha, hist_eq=False): """ this is an implementation of the illuminant-invariant color space published by Maddern2014 http://www.robots.ox.ac.uk/~mobile/Papers/2014ICRA_maddern.pdf :param img: :param alpha: camera paramete :return: """ ii_img = 0.5 + np.log(img[:, :, 1] + 1e-8) - \ alpha * np.log(img[:, :, 2] + 1e-8) - \ (1 - alpha) * np.log(img[:, :, 0] + 1e-8) # ii_img = exposure.rescale_intensity(ii_img, out_range=(0, 1)) if hist_eq: ii_img = exposure.equalize_hist(ii_img) print np.max(ii_img) print np.min(ii_img) return ii_img
def stretchHistogram(inData): """Stretch histogram of array to value range of uint8 using skimage.rescale_intensity Parameters ---------- inData: numpy array Dimensions [band,xdim,ydim] Return ------ return: numpy array TODO ---- """ nbands = inData.shape[0] outData = inData.copy() for b in range(nbands): outData[b,:,:] = rescale_intensity(inData[b,:,:], out_range = (0,255)) return outData
def scale_rgb(layers, min_max, lidx): layers_c = np.empty(layers.shape, dtype='float32') # Rescale and blur. for li in range(0, 3): layer = layers[li] layer = np.float32(rescale_intensity(layer, in_range=(min_max[li][0], min_max[li][1]), out_range=(0, 1))) layers_c[lidx[li]] = rescale_intensity(cv2.GaussianBlur(layer, ksize=(3, 3), sigmaX=3), in_range=(0, 1), out_range=(-1, 1)) return layers_c
def save_image_with_clusters(x, clusters, filename, shape=(10, 10), scale_each=False, transpose=False): '''single image, each row is a cluster''' makedirs(filename) n = x.shape[0] images = np.zeros_like(x) curr_len = 0 for i in range(10): images_i = x[clusters==i, :] n_i = images_i.shape[0] images[curr_len : curr_len+n_i, :] = images_i curr_len += n_i x = images if transpose: x = x.transpose(0, 2, 3, 1) if scale_each is True: for i in range(n): x[i] = rescale_intensity(x[i], out_range=(0, 1)) n_channels = x.shape[3] x = img_as_ubyte(x) r, c = shape if r * c < n: print('Shape too small to contain all images') h, w = x.shape[1:3] ret = np.zeros((h * r, w * c, n_channels), dtype='uint8') for i in range(r): for j in range(c): if i * c + j < n: ret[i * h:(i + 1) * h, j * w:(j + 1) * w, :] = x[i * c + j] ret = ret.squeeze() io.imsave(filename, ret)
def random_contrast(weight=lambda: np.random.rand() * 0.3 + 0.7): def call(x): w = weight() return x * w + (1 - w) * exposure.rescale_intensity(x) return call
def save_image_collections(x, filename, shape=(10, 10), scale_each=False, transpose=False): """ :param shape: tuple The shape of final big images. :param x: numpy array Input image collections. (number_of_images, rows, columns, channels) or (number_of_images, channels, rows, columns) :param scale_each: bool If true, rescale intensity for each image. :param transpose: bool If true, transpose x to (number_of_images, rows, columns, channels), i.e., put channels behind. :return: `uint8` numpy array The output image. """ makedirs(filename) n = x.shape[0] if transpose: x = x.transpose(0, 2, 3, 1) if scale_each is True: for i in range(n): x[i] = rescale_intensity(x[i], out_range=(0, 1)) n_channels = x.shape[3] x = img_as_ubyte(x) r, c = shape if r * c < n: print('Shape too small to contain all images') h, w = x.shape[1:3] ret = np.zeros((h * r, w * c, n_channels), dtype='uint8') for i in range(r): for j in range(c): if i * c + j < n: ret[i * h:(i + 1) * h, j * w:(j + 1) * w, :] = x[i * c + j] ret = ret.squeeze() io.imsave(filename, ret)
def scaling(image, method="stretching"): """ Change the image dynamic. Parameters ---------- image: Image the image to be transformed. method: str, default 'stretching' the normalization method: 'stretching', 'equalization' or 'adaptive'. Returns ------- normalize_image: Image the normalized image. """ # Contrast stretching if method == "stretching": p2, p98 = np.percentile(image.data, (2, 98)) norm_data = exposure.rescale_intensity(image.data, in_range=(p2, p98)) # Equalization elif method == "equalization": norm_data = exposure.equalize_hist(image.data) # Adaptive Equalization elif method == "adaptive": norm_data = exposure.equalize_adapthist(image.data, clip_limit=0.03) # Unknown method else: raise ValueError("Unknown normalization '{0}'.".format(method)) normalize_image = pisap.Image(data=norm_data) return normalize_image
def hist_stretch(im, percentiles=(1, 99)): p2, p98 = np.percentile(im, percentiles) im = im *100000 #im = np.array(im, np.int64) return exposure.rescale_intensity(im, in_range=percentiles)
def image_adjust(image): image = cv2.cvtColor(image, COLOR_SPACE) x, y, z = cv2.split(image) if INTENSITY_COMPONENT == 1: x = exposure.rescale_intensity(x) elif INTENSITY_COMPONENT == 2: y = exposure.rescale_intensity(y) elif INTENSITY_COMPONENT == 3: z = exposure.rescale_intensity(z) return cv2.cvtColor(cv2.merge((x, y, z)), INVERSE_COLOR_SPACE)
def image_adjust(image): return exposure.rescale_intensity(image)
def histogram_equalization(image, tile): if (tile < 0): tile = 0 elif (tile > 100): tile = 100 tile = int(tile / 10) img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(2 ** tile, 2 ** tile)) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_out = cv2.cvtColor(img_yuv, cv2.COLOR_YCrCb2BGR) img = exposure.rescale_intensity(img_out) return img
def apply_p2_98(data): """ Image normalization """ p2 = np.percentile(data, 2) p98 = np.percentile(data, 98) data = exposure.rescale_intensity(data, in_range=(p2, p98)) return data
def generate_hog_features(filename): input_image = io.imread(filename) gray_image = color.rgb2gray(input_image) # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1) fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1), visualise=True) hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) return hog_image_rescaled
def save_hog_image_comparison(filename): input_image = io.imread(filename) gray_image = color.rgb2gray(input_image) out_filename = "hog/" + filename # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1) fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1), visualise=True) # io.imsave("hog/" + filename, hog_image) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(gray_image, cmap=plt.cm.gray) ax1.set_title('Input image') ax1.set_adjustable('box-forced') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') ax1.set_adjustable('box-forced') plt.savefig(out_filename) plt.close() return hog_image
def generate_hog_features(image_arr): fd = hog(image_arr, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(2, 2), visualise=False) # hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) return fd
def image_to_hog_features(input_image): gray_image = color.rgb2gray(input_image) # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1) fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1), visualise=True) hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) return hog_image_rescaled
def segmenter_data_transform(imb, rotate=None, normalize_pctwise=False): if isinstance(imb, tuple) and len(imb) == 2: imgs,labels = imb else: imgs = imb # rotate image if training if rotate is not None: for i in xrange(imgs.shape[0]): degrees = float(np.random.randint(rotate[0], rotate[1])) if \ isinstance(rotate, tuple) else rotate imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear') if isinstance(imb, tuple): labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear') # assume they are square sz = c.fcn_img_size x,y = np.random.randint(0,imgs.shape[2]-sz,2) if imgs.shape[2] > sz else (0,0) imgs = nn.utils.floatX(imgs[:,:, x:x+sz, y:y+sz])/255. if not normalize_pctwise: pad = imgs.shape[2] // 5 cut = imgs[:,0,pad:-pad,pad:-pad] mu = cut.mean(axis=(1,2)).reshape(imgs.shape[0],1,1,1) sigma = cut.std(axis=(1,2)).reshape(imgs.shape[0],1,1,1) imgs = (imgs - mu) / sigma imgs = np.minimum(3, np.maximum(-3, imgs)) else: pclow, pchigh = normalize_pctwise if isinstance(normalize_pctwise, tuple) else (20,70) for i in xrange(imgs.shape[0]): pl,ph = np.percentile(imgs[i],(pclow, pchigh)) imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph)); imgs[i] = 2*imgs[i]/imgs[i].max() - 1. # or other rescaling here to approximate ~ N(0,1) if isinstance(imb, tuple): labels = nn.utils.floatX(labels[:,:, x:x+sz, y:y+sz]) return imgs, labels return imgs
def segment_image(im, parameter_object): dims, rows, cols = im.shape image2segment = np.dstack((rescale_intensity(im[0], in_range=(parameter_object.image_min, parameter_object.image_max), out_range=(0, 255)), rescale_intensity(im[1], in_range=(parameter_object.image_min, parameter_object.image_max), out_range=(0, 255)), rescale_intensity(im[2], in_range=(parameter_object.image_min, parameter_object.image_max), out_range=(0, 255)))) felzer = felzenszwalb(np.uint8(image2segment), scale=50, sigma=.01, min_size=5, multichannel=True).reshape(rows, cols) props = regionprops(felzer) props = np.array([p.area for p in props], dtype='uint64') return fill_labels(np.uint64(felzer), props)
def get_hog(image): image = color.rgb2gray(image) imgplot = plt.imshow(image, cmap=plt.cm.gray) fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True) hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) return hog_image_rescaled
def process_image(orig_image_arr): ratio = orig_image_arr.shape[0] / 300.0 display_image_arr = normalize_contrs(orig_image_arr,crop_display(orig_image_arr)) #display image is now segmented. gry_disp_arr = cv2.cvtColor(display_image_arr, cv2.COLOR_BGR2GRAY) gry_disp_arr = exposure.rescale_intensity(gry_disp_arr, out_range= (0,255)) #thresholding ret, thresh = cv2.threshold(gry_disp_arr,127,255,cv2.THRESH_BINARY) return thresh
def rescale(img): """sets input image to type 'uint16' :param img: image :type img: numpy.ndarray :returns: numpy.ndarray -- with type 'uint16' """ return exposure.rescale_intensity(img, in_range='uint16')
def rescale_intensity(img, val=None): return exposure.rescale_intensity(img)
def clip(img, range): return exposure.rescale_intensity(img, in_range=range)
def _color_correction(self, band, band_id, low, coverage): if self.bands == [4, 5]: return band else: print "Color correcting band " + band_id p_low, cloud_cut_low = self._percent_cut(band, low, 100 - (coverage * 3 / 4)) temp = numpy.zeros(numpy.shape(band), dtype=numpy.uint16) cloud_divide = 65000 - coverage * 100 mask = numpy.logical_and(band < cloud_cut_low, band > 0) temp[mask] = rescale_intensity(band[mask], in_range=(p_low, cloud_cut_low), out_range=(256, cloud_divide)) temp[band >= cloud_cut_low] = rescale_intensity(band[band >= cloud_cut_low], out_range=(cloud_divide, 65535)) return temp
def segmenter_data_transform(imb, shift=0, rotate=0, scale=0, normalize_pctwise=(20,95), istest=False): if isinstance(imb, tuple) and len(imb) == 2: imgs,labels = imb else: imgs = imb # rotate image if training if rotate>0: for i in xrange(imgs.shape[0]): degrees = rotate if istest else np.clip(np.random.normal(),-2,2)*rotate; imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear') if isinstance(imb, tuple): labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear') #rescale if scale>0: assert(scale>0 and scale<=0.5); for i in xrange(imgs.shape[0]): sc = 1 + (scale if istest else np.clip(np.random.normal(),-2,2)*scale); imgs[i,0] = rescale(imgs[i,0],sc); if isinstance(imb, tuple): labels[i,0] = rescale(labels[i,0], sc); #shift if shift>0 and not istest: for i in xrange(imgs.shape[0]): x,y = np.random.randint(-shift,shift,2); imgs[i,0] = img_shift(imgs[i,0], (x,y)); if isinstance(imb, tuple): labels[i,0] = img_shift(labels[i,0], (x,y)); imgs = nn.utils.floatX(imgs)/255.0; for i in xrange(imgs.shape[0]): pclow, pchigh = normalize_pctwise if isinstance(pclow,tuple): pclow = np.random.randint(pclow[0],pclow[1]); pchigh = np.random.randint(pchigh[0],pchigh[1]); pl,ph = np.percentile(imgs[i],(pclow, pchigh)) imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph)); imgs[i] = 2*imgs[i]/imgs[i].max() - 1. if isinstance(imb,tuple): labels = nn.utils.floatX(labels)/255.0; return imgs,labels else: return imgs;
def saliency(i_info, parameter_object, i_sect, j_sect, n_rows, n_cols): """ References: Federico Perazzi, Philipp Krahenbul, Yael Pritch, Alexander Hornung. Saliency Filters. (2012). Contrast Based Filtering for Salient Region Detection. IEEE CVPR, Providence, Rhode Island, USA, June 16-21. https://graphics.ethz.ch/~perazzif/saliency_filters/ Ming-Ming Cheng, Niloy J. Mitra, Xiaolei Huang, Philip H. S. Torr, Shi-Min Hu. (2015). Global Contrast based Salient Region detection. IEEE TPAMI. """ # min_max = sputilities.get_layer_min_max(i_info) min_max = [(parameter_object.image_min, parameter_object.image_max)] * 3 if parameter_object.vis_order == 'bgr': lidx = [2, 1, 0] else: lidx = [0, 1, 2] # Read the section. layers = i_info.read(bands2open=[1, 2, 3], i=i_sect, j=j_sect, rows=n_rows, cols=n_cols, d_type='float32') layers = scale_rgb(layers, min_max, lidx) # Transpose the image to RGB layers = layers.transpose(1, 2, 0) # Perform RGB to CIE Lab color space conversion layers = rgb2rgbcie(layers) # Compute Lab average values # lm = layers[:, :, 0].mean(axis=0).mean() # am = layers[:, :, 1].mean(axis=0).mean() # bm = layers[:, :, 2].mean(axis=0).mean() lm = parameter_object.lab_means[0] am = parameter_object.lab_means[1] bm = parameter_object.lab_means[2] return np.uint8(rescale_intensity((layers[:, :, 0] - lm)**2. + (layers[:, :, 1] - am)**2. + (layers[:, :, 2] - bm)**2., in_range=(-1, 1), out_range=(0, 255)))
def get_orb_keypoints(bd, image_min, image_max): """ Computes the ORB key points Args: bd (2d array) image_min (int or float) image_max (int or float) """ # We want odd patch sizes. # if parameter_object.scales[-1] % 2 == 0: # patch_size = parameter_object.scales[-1] - 1 if bd.dtype != 'uint8': bd = np.uint8(rescale_intensity(bd, in_range=(image_min, image_max), out_range=(0, 255))) patch_size = 31 patch_size_d = patch_size * 3 # Initiate ORB detector orb = cv2.ORB_create(nfeatures=int(.25*(bd.shape[0]*bd.shape[1])), edgeThreshold=patch_size, scaleFactor=1.2, nlevels=8, patchSize=patch_size, WTA_K=4, scoreType=cv2.ORB_FAST_SCORE) # Add padding because ORB ignores edges. bd = cv2.copyMakeBorder(bd, patch_size_d, patch_size_d, patch_size_d, patch_size_d, cv2.BORDER_REFLECT) # Compute ORB keypoints key_points = orb.detectAndCompute(bd, None)[0] # img = cv2.drawKeypoints(np.uint8(ch_bd), key_points, np.uint8(ch_bd).copy()) return fill_key_points(np.float32(bd), key_points)[patch_size_d:-patch_size_d, patch_size_d:-patch_size_d]
def convolve_gabor(bd, image_min, image_max, scales): """ Convolves an image with a series of Gabor kernels Args: bd (2d array) image_min (int or float) image_max (int or float) scales (1d array like) """ if bd.dtype != 'uint8': bd = np.uint8(rescale_intensity(bd, in_range=(image_min, image_max), out_range=(0, 255))) # Each set of Gabor kernels # has 8 orientations. out_block = np.empty((8*len(scales), bd.shape[0], bd.shape[1]), dtype='float32') ki = 0 for scale in scales: # Check for even or # odd scale size. if scale % 2 == 0: ssub = 1 else: ssub = 0 gabor_kernels = prep_gabor(kernel_size=(scale-ssub, scale-ssub)) for kernel in gabor_kernels: # TODO: pad array? out_block[ki] = cv2.filter2D(bd, cv2.CV_32F, kernel) ki += 1 return out_block
def get_stomata(max_proj_image, min_obj_size=200, max_obj_size=1000): """Performs image segmentation from a max_proj_image. Disposes of objects in range min_obj_size to max_obj_size :param max_proj_image: the maximum projection image :type max_proj_image: numpy.ndarray, uint16 :param min_obj_size: minimum size of object to keep :type min_obj_size: int :param max_obj_size: maximum size of object to keep :type max_obj_size: int :returns: list of [ [coordinates of kept objects - list of slice objects], binary object image - numpy.ndarray, labelled object image - numpy.ndarray ] """ # pore_margin = 10 # max_obj_size = 1000 # min_obj_size = 200 # for prop, value in segment_options: # if prop == 'pore_margin': # pore_margin = value # if prop == 'max_obj_size': # max_obj_size = value # if prop == 'min_obj_size': # min_obj_size = value # # print(pore_margin) # print(max_obj_size) # print(min_obj_size) #rescale_min = 50 #rescale_max= 100 #rescaled = exposure.rescale_intensity(max_proj_image, in_range=(rescale_min,rescale_max)) rescaled = max_proj_image seed = np.copy(rescaled) seed[1:-1, 1:-1] = rescaled.max() #mask = rescaled #if gamma != None: # rescaled = exposure.adjust_gamma(max_proj_image, gamma) #filled = reconstruction(seed, mask, method='erosion') closed = dilation(rescaled) seed = np.copy(closed) seed[1:-1, 1:-1] = closed.max() mask = closed filled = reconstruction(seed, mask, method='erosion') label_objects, nb_labels = ndimage.label(filled) sizes = np.bincount(label_objects.ravel()) mask_sizes = sizes mask_sizes = (sizes > min_obj_size) & (sizes < max_obj_size) #mask_sizes = (sizes > 200) & (sizes < 1000) mask_sizes[0] = 0 big_objs = mask_sizes[label_objects] stomata, _ = ndimage.label(big_objs) obj_slices = ndimage.find_objects(stomata) return [obj_slices, big_objs, stomata]