我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.misc.imread()。
def showData(self): print('???,????···') mask = imread(self.picfile) imgcolor = ImageColorGenerator(mask) wcc = WordCloud(font_path='./msyhl.ttc', mask=mask, background_color='white', max_font_size=200, max_words=300, color_func=imgcolor ) wc = wcc.generate_from_frequencies(self.data) plt.figure() plt.imshow(wc) plt.axis('off') print('?????') plt.show()
def main(): args.input_data_dir = os.path.abspath(args.input_data_dir) if not os.path.exists(args.output_data_dir): os.mkdir(args.output_data_dir) for dir_path, dir_names, file_names in os.walk(args.input_data_dir): if len(file_names) > 0: print(dir_path) rows = int(math.ceil(len(file_names) / 6.0)) print(rows) fig, axes = plt.subplots(4, 12, subplot_kw={'xticks': [], 'yticks': []}) fig.subplots_adjust(hspace=0.01, wspace=0.01) for ax, file_name in zip(axes.flat, file_names): print(file_name) img = imread(dir_path + '/' + file_name) ax.imshow(img) # ax.set_title(os.path.splitext(file_name)[0].replace('.227x227', '')) plt.savefig(args.output_data_dir + dir_path.replace(args.input_data_dir, '') + '.pdf')
def __getitem__(self, index): """__getitem__ :param index: """ img_path = self.files[self.split][index].rstrip() lbl_path = os.path.join(self.annotations_base, os.path.basename(img_path)[:-4] + '.png') img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.uint8) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl
def __getitem__(self, index): img_name = self.files[self.split][index] img_path = self.root + '/' + self.split + '/' + img_name lbl_path = self.root + '/' + self.split + 'annot/' + img_name img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.int8) if self.augmentations is not None: img, lbl = self.augmentations(img, lbl) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl
def __getitem__(self, index): """__getitem__ :param index: """ img_path = self.files[self.split][index].rstrip() lbl_path = os.path.join(self.annotations_base, img_path.split(os.sep)[-2], os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png') img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8)) if self.augmentations is not None: img, lbl = self.augmentations(img, lbl) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl
def data_augmentation(image_files, dir): image_list = [] new_file_name = dir save_dir = "xxx" + new_file_name for image_file in image_files: image_list.append(misc.imread(image_file)) for image in image_list: x = img_to_array(image) # this is a Numpy array with shape (3, 150, 150) x = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150) i = 0 for batch in datagen.flow(x, batch_size=1, save_to_dir=save_dir, save_prefix=dir, save_format='jpg'): i += 1 if i > 99: break return image_list # List all the files
def get_image(filepath, image_target, image_size): img = imread(filepath).astype(np.float) h_origin, w_origin = img.shape[:2] if image_target > h_origin or image_target > w_origin: image_target = min(h_origin, w_origin) h_drop = int((h_origin - image_target)/2) w_drop = int((w_origin - image_target)/2) if img.ndim == 2: img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3)) img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :] img_resize = imresize(img_crop, [image_size, image_size]) return np.array(img_resize)/127.5 - 1.
def test(): path_text_for = 'D171.png' path_text_back ='D771.png' # image forground/background im_for = misc.imread(path_text_for) im_back = misc.imread(path_text_back) size = im_for.shape s = size[0] # size of the image (squared matrix) # number of images nbr_ims = 10 train = True # generating the images data,data_labels = generate_brodatz_texture(nbr_ims, s, im_back, im_for) if train: # train sio.savemat('../data/train.mat', dict([('x_train', data), ('y_train', data_labels)])) else: # test sio.savemat('../data/test.mat', dict([('x_test', data), ('y_test', data_labels)]) )
def PrepareDataList(BASE, length): List = [] for M in range(0,min(length,len(BASE))): img, text = BASE[M] image = misc.imread(img,mode='RGB') #image = misc.imresize(image, [227, 227]) r1 = [] if isfile(text): f = open(text, 'r') s = f.readline() st = s.split(' ') for i in range(0,2): r1.append(int(st[i])) f.close() else: #If there are no txt file - "no bird situation" r1.append(0); r1.append(0); List.append([image,r1]) return List # Random test and train list
def resize_images(prms): seqNum = range(11) rawStr = ['rawLeftImFile', 'rawRightImFile'] imStr = ['leftImFile', 'rightImFile'] num = ku.get_num_images() for raw, new in zip(rawStr, imStr): for seq in seqNum: N = num[seq] print seq, N, raw, new rawNames = [prms['paths'][raw] % (seq,i) for i in range(N)] newNames = [prms['paths'][new] % (seq,i) for i in range(N)] dirName = os.path.dirname(newNames[0]) if not os.path.exists(dirName): os.makedirs(dirName) for rawIm, newIm in zip(rawNames, newNames): im = scm.imread(rawIm) im = scm.imresize(im, [256, 256]) scm.imsave(newIm, im) ## # Save images as jpgs.
def save_as_jpg(prms): seqNum = range(11) rawStr = ['rawLeftImFile', 'rawRightImFile'] imStr = ['leftImFile', 'rightImFile'] num = ku.get_num_images() for raw, new in zip(rawStr, imStr): for seq in seqNum: N = num[seq] print seq, N, raw, new rawNames = [prms['paths'][raw] % (seq,i) for i in range(N)] newNames = [prms['paths'][new] % (seq,i) for i in range(N)] dirName = os.path.dirname(newNames[0]) if not os.path.exists(dirName): os.makedirs(dirName) for rawIm, newIm in zip(rawNames, newNames): im = scm.imread(rawIm) scm.imsave(newIm, im) ## # Get the names of images
def preprocess(image_dir, new_image_dir, preprocess_fn): image_paths = [] labels = [] if os.path.isdir(new_image_dir): rmtree(new_image_dir) os.makedirs(new_image_dir) classes = os.listdir(image_dir) for clas in classes: class_dir = os.path.join(image_dir, str(clas)) new_class_dir = os.path.join(new_image_dir, str(clas)) os.makedirs(new_class_dir) for image_name in os.listdir(class_dir): image = misc.imread(os.path.join(class_dir, image_name)) image = preprocess_fn(image) misc.imsave(os.path.join(new_class_dir, image_name), image)
def process_one(image_dir, page_dir, output_dir, basename, colormap, color_labels): image_filename = os.path.join(image_dir, "{}.jpg".format(basename)) page_filename = os.path.join(page_dir, "{}.xml".format(basename)) page = PAGE.parse_file(page_filename) text_lines = [tl for tr in page.text_regions for tl in tr.text_lines] graphic_regions = page.graphic_regions img = imread(image_filename, mode='RGB') gt = np.zeros_like(img[:, :, 0]) mask1 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords) for tl in text_lines if 'comment' in tl.id], 1) mask2 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords) for tl in text_lines if not 'comment' in tl.id], 1) mask3 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords) for tl in graphic_regions], 1) arr = np.dstack([mask1, mask2, mask3]) gt_img = convert_array_masks(arr, colormap, color_labels) save_and_resize(img, os.path.join(output_dir, 'images', '{}.jpg'.format(basename))) save_and_resize(gt_img, os.path.join(output_dir, 'labels', '{}.png'.format(basename)), nearest=True)
def load_data(batch_alpha_paths,batch_eps_paths,batch_BG_paths): batch_size = batch_alpha_paths.shape[0] train_batch = [] images_without_mean_reduction = [] for i in range(batch_size): alpha = misc.imread(batch_alpha_paths[i],'L').astype(np.float32) eps = misc.imread(batch_eps_paths[i]).astype(np.float32) BG = misc.imread(batch_BG_paths[i]).astype(np.float32) batch_i,raw_RGB = preprocessing_single(alpha, BG, eps,batch_alpha_paths[i]) train_batch.append(batch_i) images_without_mean_reduction.append(raw_RGB) train_batch = np.stack(train_batch) return train_batch[:,:,:,:3],np.expand_dims(train_batch[:,:,:,3],3),np.expand_dims(train_batch[:,:,:,4],3),train_batch[:,:,:,5:8],train_batch[:,:,:,8:],images_without_mean_reduction
def load_alphamatting_data(test_alpha): rgb_path = os.path.join(test_alpha,'rgb') trimap_path = os.path.join(test_alpha,'trimap') alpha_path = os.path.join(test_alpha,'alpha') images = os.listdir(trimap_path) test_num = len(images) all_shape = [] rgb_batch = [] tri_batch = [] alp_batch = [] for i in range(test_num): rgb = misc.imread(os.path.join(rgb_path,images[i])) trimap = misc.imread(os.path.join(trimap_path,images[i]),'L') alpha = misc.imread(os.path.join(alpha_path,images[i]),'L')/255.0 all_shape.append(trimap.shape) rgb_batch.append(misc.imresize(rgb,[320,320,3])-g_mean) trimap = misc.imresize(trimap,[320,320],interp = 'nearest').astype(np.float32) tri_batch.append(np.expand_dims(trimap,2)) alp_batch.append(alpha) return np.array(rgb_batch),np.array(tri_batch),np.array(alp_batch),all_shape,images
def load_validation_data(vali_root): alpha_dir = os.path.join(vali_root,'alpha') RGB_dir = os.path.join(vali_root,'RGB') images = os.listdir(alpha_dir) test_num = len(images) all_shape = [] rgb_batch = [] tri_batch = [] alp_batch = [] for i in range(test_num): rgb = misc.imread(os.path.join(RGB_dir,images[i])) alpha = misc.imread(os.path.join(alpha_dir,images[i]),'L') trimap = generate_trimap(np.expand_dims(np.copy(alpha),2),np.expand_dims(alpha,2))[:,:,0] alpha = alpha / 255.0 all_shape.append(trimap.shape) rgb_batch.append(misc.imresize(rgb,[320,320,3])-g_mean) trimap = misc.imresize(trimap,[320,320],interp = 'nearest').astype(np.float32) tri_batch.append(np.expand_dims(trimap,2)) alp_batch.append(alpha) return np.array(rgb_batch),np.array(tri_batch),np.array(alp_batch),all_shape,images
def main(args): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = args.gpu_fraction) with tf.Session(config=tf.ConfigProto(gpu_options = gpu_options)) as sess: saver = tf.train.import_meta_graph('./meta_graph/my-model.meta') saver.restore(sess,tf.train.latest_checkpoint('./model')) image_batch = tf.get_collection('image_batch')[0] GT_trimap = tf.get_collection('GT_trimap')[0] pred_mattes = tf.get_collection('pred_mattes')[0] rgb = misc.imread(args.rgb) alpha = misc.imread(args.alpha,'L') trimap = generate_trimap(np.expand_dims(np.copy(alpha),2),np.expand_dims(alpha,2))[:,:,0] origin_shape = alpha.shape rgb = np.expand_dims(misc.imresize(rgb.astype(np.uint8),[320,320,3]).astype(np.float32)-g_mean,0) trimap = np.expand_dims(np.expand_dims(misc.imresize(trimap.astype(np.uint8),[320,320],interp = 'nearest').astype(np.float32),2),0) feed_dict = {image_batch:rgb,GT_trimap:trimap} pred_alpha = sess.run(pred_mattes,feed_dict = feed_dict) final_alpha = misc.imresize(np.squeeze(pred_alpha),origin_shape) # misc.imshow(final_alpha) misc.imsave('./alpha.png',final_alpha)
def chooose_ims(batch_size): global index I1 = np.ndarray(shape=[1,256,256,1]) U1 = np.ndarray(shape=[1,256,256,1]) V1 = np.ndarray(shape=[1,256,256,1]) for i in range(batch_size): if index>=len(files): index=0 image = im.imread("/dataImages/dataset/"+files[index],mode='RGB') image = image/255 I, U, V = imManipulation.rgb2yuv(image) I = np.reshape(I, (1,256, 256,1)) U = np.reshape(U, (1,256, 256,1)) V = np.reshape(V, (1,256, 256,1)) I1 = np.concatenate((I1,I),axis=0) U1 = np.concatenate((U1,U),axis=0) V1 = np.concatenate((V1,V),axis=0) index+=1 I1 = I1[1:,:,:,:] U1 = U1[1:,:,:,:] V1 = V1[1:,:,:,:] return I1,U1,V1
def _transform(self, filename, flag = False): if flag: image = np.array(Image.open(filename), dtype=np.uint8) image[image == 255] = 21 else: image = misc.imread(filename) if self.__channels and len(image.shape) < 3: # make sure images are of shape(h,w,3) image = np.array([image for i in range(3)]) if self.image_options.get("resize", False) and self.image_options["resize"]: resize_size = int(self.image_options["resize_size"]) resize_image = misc.imresize(image, [resize_size, resize_size], interp='nearest') else: resize_image = image return np.array(resize_image)
def level_curves(fname, npoints = 200, smoothing = 10, level = 0.5) : "Loads regularly sampled curves from a .PNG image." # Find the contour lines img = misc.imread(fname, flatten = True) # Grayscale img = (img.T[:, ::-1]) / 255. img = gaussian_filter(img, smoothing, mode='nearest') lines = find_contours(img, level) # Compute the sampling ratio for every contour line lengths = np.array( [arclength(line) for line in lines] ) points_per_line = np.ceil( npoints * lengths / np.sum(lengths) ) # Interpolate accordingly points = [] ; connec = [] ; index_offset = 0 for ppl, line in zip(points_per_line, lines) : (p, c) = resample(line, ppl) points.append(p) connec.append(c + index_offset) index_offset += len(p) size = np.maximum(img.shape[0], img.shape[1]) points = np.vstack(points) / size connec = np.vstack(connec) return Curve(points, connec) # Pyplot Output =================================================================================
def test_recognize(args): imdetect = args.detect im1 = args.im1 im2 = args.im2 payload = {'img':file2base64(imdetect)} import numpy as np imarr = np.array(misc.imread(imdetect)) r = requests.get("http://face.icybee.cn/face/face_detect", data=payload) print(json.loads(r.text)['boxes'][0]) box = json.loads(r.text)['boxes'][0] box = [int(i) for i in box] misc.imsave('sample.jpg',imarr[box[1]:box[3],box[0]:box[2],:],) payload = { 'img1':file2base64(im1), 'img2':file2base64(im2) } r = requests.get("http://face.icybee.cn/face/face_recognize", data=payload) print(r.text) #print(json.loads(r.text)['dist'])
def write_data_in_synset_folders(part_data, part, out_dir, image_size): part_dir = os.path.join(out_dir, part) os.mkdir(part_dir) num_wnids = len(part_data) for i, (wnid, wnid_data) in enumerate(part_data.iteritems()): print 'Writing images for synset %d / %d of %s' % (i + 1, num_wnids, part) wnid_dir = os.path.join(part_dir, wnid) os.mkdir(wnid_dir) image_dir = os.path.join(wnid_dir, 'images') os.mkdir(image_dir) boxes_filename = os.path.join(wnid_dir, '%s_boxes.txt' % wnid) boxes_file = open(boxes_filename, 'w') for i, (img_filename, bbox) in enumerate(wnid_data): out_img_filename = '%s_%d.JPEG' % (wnid, i) full_out_img_filename = os.path.join(image_dir, out_img_filename) img = imread(img_filename) img_resized, bbox_resized = resize_image(img, image_size, bbox) imsave(full_out_img_filename, img_resized) boxes_file.write('%s\t%d\t%d\t%d\t%d\n' % (out_img_filename, bbox_resized[0], bbox_resized[1], bbox_resized[2], bbox_resized[3])) boxes_file.close()
def image_read(self, imname): image = misc.imread(imname, mode='RGB').astype(np.float) r,c,ch = image.shape if r < 299 or c < 299: # TODO: check too small images # print "##too small!!" image = misc.imresize(image, (299, 299, 3)) elif r > 299 or c > 299: image = image[(r-299)/2 : (r-299)/2 + 299, (c-299)/2 : (c-299)/2 + 299, :] # print r, c, image.shape assert image.shape == (299, 299, 3) image = (image / 255.0) * 2.0 - 1.0 if self.random_noise: add_noise = bool(random.getrandbits(1)) if add_noise: eps = random.choice([4.0, 8.0, 12.0, 16.0]) / 255.0 * 2.0 noise_image = image + eps * np.random.choice([-1, 1], (299,299,3)) image = np.clip(noise_image, -1.0, 1.0) return image
def resizeImg(imgPath,img_size): try: img = imread(imgPath) h, w, _ = img.shape scale = 1 if w >= h: new_w = img_size if w >= new_w: scale = float(new_w) / w new_h = int(h * scale) else: new_h = img_size if h >= new_h: scale = float(new_h) / h new_w = int(w * scale) new_img = imresize(img, (new_h, new_w), interp='bilinear') imsave(imgPath,new_img) print('Img Resized as {}'.format(img_size)) except Exception as e: print(e)
def resizeImg(imgPath,img_size): img = imread(imgPath) h, w, _ = img.shape scale = 1 if w >= h: new_w = img_size if w >= new_w: scale = float(new_w) / w new_h = int(h * scale) else: new_h = img_size if h >= new_h: scale = float(new_h) / h new_w = int(w * scale) new_img = imresize(img, (new_h, new_w), interp='bilinear') imsave(imgPath,new_img) #Download img #Later we can do multi thread apply workers to do faster work
def resizeImg(imgPath,img_size): img = imread(imgPath) h, w, _ = img.shape scale = 1 if w >= h: new_w = img_size if w >= new_w: scale = float(new_w) / w new_h = int(h * scale) else: new_h = img_size if h >= new_h: scale = float(new_h) / h new_w = int(w * scale) new_img = imresize(img, (new_h, new_w), interp='bilinear') imsave(imgPath,new_img) print('Img Resized as {}'.format(img_size))
def get_rgbd_file(self, dirname, offset): associations = self.seq_dir_map[dirname]['associations'] if associations[offset, 1].startswith('depth'): rgb_filename = os.path.join(dirname, associations[offset, 3]) depth_filename = os.path.join(dirname, associations[offset, 1]) else: rgb_filename = os.path.join(dirname, associations[offset, 1]) depth_filename = os.path.join(dirname, associations[offset, 3]) rgb_img = ndimage.imread(rgb_filename) depth_img = ndimage.imread(depth_filename) width = height = 224 # Reshape depth_img = np.reshape(depth_img, list(depth_img.shape) + [1]) depth_img = 255 * depth_img / np.max(depth_img) rgbd_img = np.concatenate((rgb_img, depth_img), 2) # Resize rgbd_img = transform.resize(rgbd_img, [width, height], preserve_range=True) return rgb_filename, depth_filename, rgbd_img.astype(np.float32)
def read_rgb_image(filepath): rgb_img = ndimage.imread(filepath) width = height = 224 img_width = rgb_img.shape[1] img_height = rgb_img.shape[0] # scale such that smaller dimension is 256 if img_width < img_height: factor = 256.0 / img_width else: factor = 256.0 / img_height rgb_img = transform.rescale(rgb_img, factor, preserve_range=True) # crop randomly width_start = np.random.randint(0, rgb_img.shape[1] - width) height_start = np.random.randint(0, rgb_img.shape[0] - height) rgb_img = rgb_img[height_start:height_start + height, width_start:width_start + width] return rgb_img
def load_mask(mask_path, shape): mask = imread(mask_path, mode="L") # Grayscale mask load width, height, _ = shape mask = imresize(mask, (width, height), interp='bicubic').astype('float32') # Perform binarization of mask mask[mask <= 127] = 0 mask[mask > 128] = 255 max = np.amax(mask) mask /= max return mask # util function to apply mask to generated image
def preprocess_image(image_path, load_dims=False, style_image=False): global img_WIDTH, img_HEIGHT, aspect_ratio, b_scale_ratio_height, b_scale_ratio_width img = imread(image_path, mode="RGB") # Prevents crashes due to PNG images (ARGB) if load_dims: img_WIDTH = img.shape[0] img_HEIGHT = img.shape[1] aspect_ratio = img_HEIGHT / img_WIDTH if style_image: b_scale_ratio_width = float(img.shape[0]) / img_WIDTH b_scale_ratio_height = float(img.shape[1]) / img_HEIGHT img = imresize(img, (img_width, img_height)) img = img.transpose((2, 0, 1)).astype('float64') img = np.expand_dims(img, axis=0) return img # util function to convert a tensor into a valid image
def load_test_data(phone, dped_dir, IMAGE_SIZE): test_directory_phone = dped_dir + str(phone) + '/test_data/patches/' + str(phone) + '/' test_directory_dslr = dped_dir + str(phone) + '/test_data/patches/canon/' NUM_TEST_IMAGES = len([name for name in os.listdir(test_directory_phone) if os.path.isfile(os.path.join(test_directory_phone, name))]) test_data = np.zeros((NUM_TEST_IMAGES, IMAGE_SIZE)) test_answ = np.zeros((NUM_TEST_IMAGES, IMAGE_SIZE)) for i in range(0, NUM_TEST_IMAGES): I = np.asarray(misc.imread(test_directory_phone + str(i) + '.jpg')) I = np.float16(np.reshape(I, [1, IMAGE_SIZE]))/255 test_data[i, :] = I I = np.asarray(misc.imread(test_directory_dslr + str(i) + '.jpg')) I = np.float16(np.reshape(I, [1, IMAGE_SIZE]))/255 test_answ[i, :] = I if i % 100 == 0: print(str(round(i * 100 / NUM_TEST_IMAGES)) + "% done", end="\r") return test_data, test_answ
def predict(): # get data from drawing canvas and save as image parseImage(request.get_data()) # read parsed image back in 8-bit, black and white mode (L) x = imread('output.png', mode='L') x = np.invert(x) x = imresize(x,(28,28)) # reshape image data for use in neural network x = x.reshape(1,28,28,1) with graph.as_default(): out = model.predict(x) print(out) print(np.argmax(out, axis=1)) response = np.array_str(np.argmax(out, axis=1)) return response
def process_mot(path): ''' 1920 x 1080 -> 384 x 216 640 x 480 -> 320 x 240 ''' images = [] for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: if filename[-4:] == ".jpg" and "_ds" not in filename: full_path = os.path.join(dirpath, filename) img = misc.imread(full_path,mode='RGB') if img.shape == LARGE_IMAGE_SIZE: img = misc.imresize(img, size=LARGE_IMAGE_RESCALE) img = pad_image(img, FINAL_IMAGE_SIZE) elif img.shape == MEDIUM_IMAGE_SIZE: img = misc.imresize(img, size=MEDIUM_IMAGE_RESCALE) img = pad_image(img, FINAL_IMAGE_SIZE) else: print("Unexpected shape " + str(img.shape)) continue output_filename = os.path.join(dirpath, filename[:-4] + "_ds.jpg") misc.imsave(output_filename, img) images.append(output_filename) return images
def process_vot(path, min_height, min_width): images = [] for dirpath, dirnames, filenames in os.walk(path): img_shape = None pad_height = 0 pad_width = 0 for filename in filenames: if filename[-4:] == ".jpg" and "_ds" not in filename: full_path = os.path.join(dirpath, filename) img = misc.imread(full_path,mode='RGB') img_shape = img.shape ratio = min(float(min_width)/img.shape[1], float(min_height)/img.shape[0]) img = misc.imresize(img, size=ratio) img, pad_height, pad_width = pad_image(img, (min_height, min_width)) output_filename = os.path.join(dirpath, filename[:-4] + "_ds.jpg") misc.imsave(output_filename, img) images.append(output_filename) if img_shape: gt_path = os.path.join(dirpath, "groundtruth.txt") preprocess_label(gt_path, ratio, img_shape, min_height, min_width, pad_height, pad_width) return images
def do_roc(self): if self.gan_mode and self.dmodel2 is not None: dmodel_cur = self.dmodel2 scale_factor = 2 elif self.dmodel is not None: dmodel_cur = self.dmodel scale_factor = self.scale_factor else: theApp.cur_hist_tex = theApp.standard_hist_tex theApp.cur_roc_tex = theApp.standard_roc_tex return encoded_vector_source = self.get_encoded(dmodel_cur, self.cur_vector_source, scale_factor) encoded_vector_dest = self.get_encoded(dmodel_cur, self.cur_vector_dest, scale_factor) attribute_vector = encoded_vector_dest - encoded_vector_source threshold = None outfile = "{}/{}".format(roc_dir, get_date_str()) do_roc(attribute_vector, encoded, attribs, attribute_index, threshold, outfile) hist_img = imread("{}_hist_both.png".format(outfile), mode='RGB') roc_img = imread("{}_roc.png".format(outfile), mode='RGB') hist_img = imresize(hist_img, roc_image_resize) roc_img = imresize(roc_img, roc_image_resize) theApp.cur_hist_tex = image_to_texture(hist_img) theApp.cur_roc_tex = image_to_texture(roc_img)
def save_images(img_dir, dest_file): img_list = os.listdir(img_dir) img_combo = [] print('starting to save ' + str(len(img_list)) + ' images') count = 0 for img_name in img_list: # can change this line to img_name.startswith('center') for center imgs if not img_name.startswith('.'): if count % 500 == 0: print('count is', count) img = misc.imread(img_dir + '/' + img_name) img_combo.append(img) count += 1 #cast to numpy array and save to file all_images = np.array(img_combo) print('images shape', all_images.shape) np.save(dest_file, all_images)
def show_file_images(filename, img_list): fig = plt.figure() #for 9 random images, print them for img_num in range(0, 9): random_num = random.randint(0, len(img_list)) img_name = img_list[random_num] print('image name is ', img_name) img = misc.imread(filename + img_name) np_img = np.array(img) flipped_img = np.fliplr(np_img)[60:160] # print('img is ', img) img = img[60:160] fig.add_subplot(5, 5, img_num * 2 + 1) plt.imshow(img) fig.add_subplot(5, 5, img_num * 2 + 2) plt.imshow(flipped_img) plt.show()
def count_images(img_dir): #add each to img_combo img_list = os.listdir(img_dir) l_count = 0 c_count = 0 r_count =0 for img_name in img_list: if img_name.startswith('center'): c_count += 1 elif img_name.startswith('left'): l_count += 1 elif img_name.startswith('right'): r_count +=1 # img = misc.imread(img_dir + '/' + img_name) # img_combo.append(img) print('counts l, c, r:', l_count, c_count, r_count)
def get_images_from_request(request_file, names): """get pillow images from flask request @input: request_file: request.files @input: names: image name list for read @output: type ndarray. The array obtained by reading the image. """ img_list = [] for name in names: # get upload file f = request_file.get(name) if f is None: continue img = misc.imread(f) img_list.append(img) return img_list
def view_(_pred,_lable): fname = ['Captcha/lv3/%i.jpg' %i for i in range(20)] img = [] for fn in fname: img.append(Image.open(open(fn))) #img.append(misc.imread(fn).astype(np.float)) for i in range(len(img)): pylab.subplot(4,5,i+1); pylab.axis('off') pylab.imshow(img[i]) #pylab.imshow( np.dot(np.array(img[i])[...,:3],[0.299,0.587,0.114]) , cmap=plt.get_cmap("gray")) #pylab.text(40,60,_pred[i],color = 'b') if ( _pred[i] == _lable[i] ): pylab.text(40,65,_pred[i],color = 'b',size = 15) else: pylab.text(40,65,_pred[i],color = 'r',size = 15) pylab.text(40,92,_lable[i],color = 'g',size = 15) pylab.show()
def get_batch(generator_type, set_type, height, width): imgs = [] if set_type == 'train' or set_type == 'val': for paths, bbs, labels in generator_type: for i in range(len(paths)): img = gray2rgb(misc.imread(paths[i]), paths[i]) img = img[bbs[i][1]:bbs[i][1]+bbs[i][3], bbs[i][0]:bbs[i][0]+bbs[i][2],:] img = preprocess_image(img, height, width, set_type) imgs.append(img) imgs = np.asarray(imgs) break return imgs, labels else: for paths, bbs in generator_type: for i in range(len(paths)): img = gray2rgb(misc.imread(paths[i]), paths[i]) img = img[bbs[i][1]:bbs[i][1]+bbs[i][3], bbs[i][0]:bbs[i][0]+bbs[i][2],:] imgs.append(preprocess_image(img, height, width, set_type)) imgs = np.asarray(imgs) break return imgs, None #store in required csv format
def get_img(data_path): # Getting image array from path: img = imread(data_path) img = imresize(img, (64, 64)) return img
def readimg(img_path): img = misc.imread(img_path, mode='RGB') img = misc.imresize(img, (160, 160)) img = facenet.prewhiten(img) img = np.expand_dims(img, axis=0) return img
def get_embedding(img_path): img = misc.imread(img_path, mode='RGB') # judge alignment aligned = align.align(160, img, [0, 0, img.shape[1], img.shape[0]], landmarkIndices=landmarkIndices) img = facenet.prewhiten(img) img = np.expand_dims(img, axis=0) aligned = facenet.prewhiten(aligned) aligned = np.expand_dims(aligned, axis=0) # Run forward pass to calculate embeddings feed_dict = {images_placeholder: img, phase_train_placeholder: False} emb = sess.run(embeddings, feed_dict=feed_dict) # Run forward pass to calculate embeddings feed_dict_aligned = {images_placeholder: aligned, phase_train_placeholder: False} emb_aligned = sess.run(embeddings, feed_dict=feed_dict_aligned) return emb.ravel(), emb_aligned.ravel() # # for test # import os # from time import time # def main(dir_path): # img_all = os.listdir(dir_path) # for f in img_all: # start = time() # embedding_result = get_embedding(os.path.join(dir_path, f)) # print time() - start # print embedding_result # # main('./data')
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True): nrof_samples = len(image_paths) images = np.zeros((nrof_samples, image_size, image_size, 3)) for i in range(nrof_samples): img = misc.imread(image_paths[i]) if img.ndim == 2: img = to_rgb(img) if do_prewhiten: img = prewhiten(img) img = crop(img, do_random_crop, image_size) img = flip(img, do_random_flip) images[i,:,:,:] = img return images
def load_image(img_file_path): img = imread(img_file_path) img = (imresize(img, (227, 227))[:, :, :3]).astype(float32) img = img - mean(img) return img
def load_images(image_names): imgs = [] for img_name in image_names: img = imread(img_name) img = (imresize(img, (227, 227))[:, :, :3]).astype(float32) img = img - mean(img) imgs.append(img) return imgs
def main(): img = imread(args.input_path) img = ndimage.rotate(img, args.angle, mode=args.mode) misc.imsave(args.output_path, img)