我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用PIL.Image()。
def predict(image,the_net): inputs = [] try: tmp_input = image tmp_input = cv2.resize(tmp_input,(SIZE,SIZE)) tmp_input = tmp_input[11:11+128,11:11+128]; tmp_input = np.subtract(tmp_input,mean) tmp_input = tmp_input.transpose((2, 0, 1)) tmp_input = np.require(tmp_input, dtype=np.float32) except Exception as e: raise Exception("Image damaged or illegal file format") return the_net.blobs['data'].reshape(1, *tmp_input.shape) the_net.reshape() the_net.blobs['data'].data[...] = tmp_input the_net.forward() scores = the_net.blobs['prob'].data[0] return copy.deepcopy(scores)
def predict(image,the_net): inputs = [] try: tmp_input = image tmp_input = cv2.resize(tmp_input,(SIZE,SIZE)) tmp_input = tmp_input[13:13+224,13:13+224]; tmp_input = np.subtract(tmp_input,mean) tmp_input = tmp_input.transpose((2, 0, 1)) tmp_input = np.require(tmp_input, dtype=np.float32) except Exception as e: raise Exception("Image damaged or illegal file format") return the_net.blobs['data'].reshape(1, *tmp_input.shape) the_net.reshape() the_net.blobs['data'].data[...] = tmp_input the_net.forward() scores = the_net.blobs['prob'].data[0] return copy.deepcopy(scores)
def show(self, image, tshape): '''Convers to PIL.image. Args: image (numpy.array) tshape (tuple). Returns: PIL.Image: image to visualize. ''' fshape = self.image_shape X = image.T return PIL.Image.fromarray(tile_raster_images( X=X, img_shape=fshape, tile_shape=tshape, tile_spacing=(1, 1)))
def vis_square(data, padsize=1, padval=0): data -= data.min() data /= data.max() # Force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3) #UNCOMMENT_THIS data = np.pad(data, padding, mode='reflect', constant_values=(padval, padval)) data = np.pad(data, padding, mode='constant', constant_values=(padval, padval)) # Tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) #IF you want to use Show Image for static images, comment out thr if and return statements if data.ndim == 3: data = data[:, :, ::-1] return data #showimage(data) # Plot the last image and conv1 layer's weights and responses
def supic_process(input_path, output_dir, out_width, out_height): """ customized api for processing input image """ try: enhancer = NeuralEnhancer(loader=False) img = scipy.ndimage.imread(input_path, mode='RGB') out = enhancer.process(img) out = out.resize((out_width, out_height), PIL.Image.BICUBIC) name = hex(int(time.time() * 100000))[2:] output_path = os.path.join(output_dir, name + '.png') out.save(output_path) return output_path except Exception as e: return '!ERROR' + str(e)
def generate_thumb(self, size, orig_resource, thumb_resource): with orig_resource.cache_open() as orig: im = self.Image.open(orig) im.thumbnail(size) with thumb_resource.cache_open('wb') as target: if thumb_resource.typestring.ts_format == 'thumb.jpg': # Ensure it has no alpha before saving p_mode_alpha = (im.mode == 'P' and 'transparency' in im.info) if im.mode in ('RGBA', 'LA') or p_mode_alpha: alpha = im.convert('RGBA').split()[-1] no_alpha = self.Image.new("RGB", im.size, (255, 255, 255)) no_alpha.paste(im, mask=alpha) no_alpha.save(target, 'JPEG') else: im.save(target, 'JPEG') else: # Save as is im.save(target)
def test_pil_plugins(pyi_builder): pyi_builder.test_source( """ # Verify packaging of PIL.Image. Specifically, the hidden import of FixTk # importing tkinter is causing some problems. from PIL.Image import fromstring print(fromstring) # PIL import hook should bundle all available PIL plugins. Verify that plugins # are bundled. from PIL import Image Image.init() MIN_PLUG_COUNT = 7 # Without all plugins the count is usually 6. plugins = list(Image.SAVE.keys()) plugins.sort() if len(plugins) < MIN_PLUG_COUNT: raise SystemExit('No PIL image plugins were bundled!') else: print('PIL supported image formats: %s' % plugins) """)
def test_image_resizing(self): image = Image(organization=self.organization) file = ContentFile(self.create_random_image(100, 100).read()) image.file.save('random_image.png', file, save=False) image.save() image_file = PIL.Image.open(image.file.file) self.assertEqual((100, 100), image_file.size) image = Image(organization=self.organization) file = ContentFile(self.create_random_image(100, 100).read()) image.file.save('random_image.png', file, save=False) image.width = 50 image.save() image_file = PIL.Image.open(image.file.file) self.assertEqual((50, 50), image_file.size)
def plot_image(image): # Assume the pixel-values are scaled between 0 and 255. if False: # Convert the pixel-values to the range between 0.0 and 1.0 image = np.clip(image/255.0, 0.0, 1.0) # Plot using matplotlib. plt.imshow(image, interpolation='lanczos') plt.show() else: # Ensure the pixel-values are between 0 and 255. image = np.clip(image, 0.0, 255.0) # Convert pixels to bytes. image = image.astype(np.uint8) # Convert to a PIL-image and display it. display(PIL.Image.fromarray(image)) # Normalize an image so its values are between 0.0 and 1.0. This is useful for plotting the gradient. # In[13]:
def loadPIL_LUT(self, dataset): if not have_PIL: raise ImportError("Python Imaging Library is not available. See http://www.pythonware.com/products/pil/ to download and install") if('PixelData' not in dataset): raise TypeError("Cannot show image -- DICOM dataset does not have pixel data") if('WindowWidth' not in dataset) or ('WindowCenter' not in dataset): # can only apply LUT if these values exist bits = dataset.BitsAllocated samples = dataset.SamplesPerPixel if bits == 8 and samples == 1: mode = "L" elif bits == 8 and samples == 3: mode = "RGB" elif bits == 16: # not sure about this -- PIL source says is 'experimental' and no documentation. mode = "I;16" # Also, should bytes swap depending on endian of file and system?? else: raise TypeError("Don't know PIL mode for %d BitsAllocated and %d SamplesPerPixel" % (bits, samples)) size = (dataset.Columns, dataset.Rows) im = PIL.Image.frombuffer(mode, size, dataset.PixelData, "raw", mode, 0, 1) # Recommended to specify all details by http://www.pythonware.com/library/pil/handbook/image.htm else: image = self.get_LUT_value(dataset.pixel_array, dataset.WindowWidth, dataset.WindowCenter) im = PIL.Image.fromarray(image).convert('L') # Convert mode to L since LUT has only 256 values: http://www.pythonware.com/library/pil/handbook/image.htm return im
def image_from_array(img_array, format='png'): """Creates an image object from a given numpy array. Parameters ---------- img_array : numpy.ndarray The image data, which can have 1 or 3 color channels. Returns ------- IPython.display.Image An image object for plots. """ factor = 1 if utils.image.is_float_image(img_array): factor = 255 img_data = np.uint8(img_array * factor) f = StringIO() img_data = utils.image.to_rgb(img_data) arr = PIL.Image.fromarray(img_data) arr.save(f, format) return Image(data=f.getvalue())
def load_image(filename): image = None if wand is not None: try: image=wand.image.Image(filename=filename) except: image = None return image elif PIL is not None: try: image=PIL.Image.open(filename) except: image = None print(filename+" is not a invalid img!") return image else: sys.stderr.write('You must have wand or Pillow/PIL installed to use the dhash command\n') sys.exit(1)
def arrangePics(targetDir, picDir): targetDirPathLen=len(targetDir) count=0 for path, d, filelist in os.walk(targetDir): if (not path.endswith('.git')) and isPathInList(targetLimitedSubDirs, path): for filename in filelist: if (filename.endswith('jpg') or filename.endswith('png') or filename.endswith('jpeg') or filename.endswith('gif')): fileNameWithPath = os.path.join(path, filename) image = load_image(fileNameWithPath) if (image is not None): ratio = format(float(image.height) / float(image.width), '.2f') tempPath = os.path.join(picDir, str(ratio)) if (not os.path.exists(tempPath)): os.mkdir(tempPath) filenameNew=fileNameWithPath[targetDirPathLen+1:].replace('/', "_", 50) tempFileNameWithPath = os.path.join(tempPath, filenameNew) print(fileNameWithPath+" copy to "+tempFileNameWithPath) count = count + 1 shutil.copy(fileNameWithPath, tempFileNameWithPath) print("Total Image count is "+str(count))
def similarity(self, imgA, imgB): """Given two images of the same size, this function compute the similarity of the pixel values. The function compute the differences of RGB values of a pixel and weight it with the alpha value. :param imgA: Image to be compared. :param imgB: Image to be compared. :returns: Similarity of two images. """ # # print(imgA) # print(imgB) delta_R = imgA[:,:,0] - imgB[:,:,0] delta_G = imgA[:,:,1] - imgB[:,:,1] delta_B = imgA[:,:,2] - imgB[:,:,2] delta = (np.absolute(delta_R) + np.absolute(delta_G) + np.absolute(delta_B)) / 3 return (1 - np.mean(delta))
def pytest_runtest_setup(item): if isinstance(item, item.Function): try: from PIL import Image except ImportError: Image = False if item.get_marker("pil_required") and Image is False: pytest.skip("PIL must be installed") elif item.get_marker("pil_not_installed") and Image: pytest.skip("PIL is installed") elif item.get_marker("not_py33"): pytest.skip("Ordering is not a given in Python 3") elif item.get_marker("lxml_required"): from openpyxl import LXML if not LXML: pytest.skip("LXML is required for some features such as schema validation")
def onDownloadComplete(self, url, data): self.requested = False if not data: # print('Request Failed: {}'. format(self.result.item.name)) return if self.image: return # CONVERT DATA TO GIF IMAGE try: img = PIL.Image.open(data) self.image = PIL.ImageTk.PhotoImage(img) self.updateOverlayImage(img) if url not in self.CACHE: self.CACHE[url] = data # notify ui self.ui_queue.put((MsgType.Object, self)) except OSError as e: item = self.item with open('tmp\\{}.err.png'.format(item.name.strip()), mode='wb') as f: f.write(data.getvalue()) logger.error('Image conversion failed: {}, Length: {}\t{}'.format(item.name, len(data.getvalue()), url))
def pil_image_to_pixbuf(self, image_fn, angle): """use Python Image Library (PIL) to load an image, rotate it, and return as a pixbuf) """ pixbuf = None if os.path.isfile(image_fn): pil_image = PIL.Image.open(image_fn) if angle is not 0: pil_image = pil_image.rotate(angle,PIL.Image.BICUBIC,1) fd = StringIO.StringIO() pil_image.save(fd, "png") contents = fd.getvalue() fd.close() loader = gtk.gdk.PixbufLoader("png") loader.write(contents, len(contents)) pixbuf = loader.get_pixbuf() loader.close() #done return pixbuf
def __call__(self, img): """ Args: img (PIL.Image): Image to be scaled. Returns: PIL.Image: Rescaled image. """ if isinstance(self.size, int): w, h = img.size if (w <= h and w == self.size) or (h <= w and h == self.size): return img if w < h: ow = self.size oh = int(self.size * h / w) return img.resize((ow, oh), self.interpolation) else: oh = self.size ow = int(self.size * w / h) return img.resize((ow, oh), self.interpolation) else: return img.resize(self.size, self.interpolation)
def Run(self, img_path, guide_image_path='', objective=0): """Run deep dream""" self.guide_path = guide_image_path if self.guide_path != '': self.Get_guide() self.net.blobs.keys() if img_path != '': frame = PIL.Image.open(img_path) frame = imresize(frame) frame = np.float32(frame) else: frame = self.GenerateInputImage() frame_i = 0 h, w = frame.shape[:2] #s = 0.05 # scale coefficient for i in xrange(self.epoch): start = time.time() frame = self.Deepdream(frame) PIL.Image.fromarray(np.uint8(frame)).save("frames/%04d.jpg"%frame_i) #frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1) frame_i += 1 stop = time.time() print "Time cost for {:d}th image: {:.3f} s".format(i,stop-start)
def run_inference_on_image(image): """Runs inference on an image. Args: image: Image file name. Returns: Nothing """ #image_data = tf.gfile.FastGFile(image, 'rb').read() image_data = image # Creates graph from saved GraphDef. #create_graph() with tf.Session() as sess: # Some useful tensors: # 'softmax:0': A tensor containing the normalized prediction across # 1000 labels. # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 # float description of the image. # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG # encoding of the image. # Runs the softmax tensor by feeding the image_data as input to the graph. softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data.tostring()}) predictions = np.squeeze(predictions) sess.close() # Creates node ID --> English string lookup. node_lookup = NodeLookup() top_k = predictions.argsort()[1:][::-1] human_string = node_lookup.id_to_string(top_k[0]) score = predictions[top_k[0]] return { 'autocategory': human_string, 'autocategoryconfidence': str(score) } #print('%s (score = %.5f)' % (human_string, score))
def check_space(text,data): if data.size < len(text): print '[*] Image not big enough' sys.exit(0)
def showarray(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = StringIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) #ANIMAL model (default) #Here you select the model
def showarray(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = StringIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) #Here you select the model
def showarray(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = StringIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) #ANIMAL #PLEASE MAKE SURE TO SELECT THE RIGHT MODEL FOR THE KEYS!!!
def test_pil_version(self): import sys import PIL.Image major,minor,sub = PIL.Image.VERSION.split('.')[:3] rmajor,rminor,rsub = 1,1,5 # 2008/03/20 major,minor,sub = int(major),int(minor),int(sub) print >> sys.stderr, "%d.%d.%d >= %d.%d.%d "%(major,minor,sub,rmajor,rminor,rsub), sys.stderr.flush() self.assert_(major > rmajor or major == rmajor and minor >= rminor or major == rmajor and minor == rminor and sub >= sub)
def depart_header(self, node): start = self.context.pop() header = [self.starttag(node, 'div', CLASS='header')] header.extend(self.body[start:]) header.append('\n<hr class="header"/>\n</div>\n') self.body_prefix.extend(header) self.header.extend(header) del self.body[start:] # Image types to place in an <object> element
def _screencap(args): dev = ioskit.Device(args.udid) image = dev.screenshot() if args.rotate: method = getattr(Image, 'ROTATE_{}'.format(args.rotate)) image = image.transpose(method) image.save(args.output) print 'Screenshot saved to "%s"' % args.output
def screenshot(self, filename=None): ''' Take ios screenshot Args: - filename(string): optional Returns: PIL.Image object ''' image = self.d.screenshot() if self.rotation: method = getattr(Image, 'ROTATE_{}'.format(self.rotation*90)) image = image.transpose(method) if filename: image.save(filename) return image
def showBGRimage(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) a[:,:,[0,2]] = a[:,:,[2,0]] # for B,G,R order f = StringIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue()))
def showmap(a, fmt='png'): a = np.uint8(np.clip(a, 0, 255)) f = StringIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) #def checkparam(param): # octave = param['octave'] # starting_range = param['starting_range'] # ending_range = param['ending_range'] # assert starting_range <= ending_range, 'starting ratio should <= ending ratio' # assert octave >= 1, 'octave should >= 1' # return starting_range, ending_range, octave
def load_and_resize_image(path, height, width, mode='RGB'): """ Returns an np.ndarray (height x width x channels) mode -- (RGB for color or L for grayscale) """ image = PIL.Image.open(path) # YY => RGB 8 bits, jpeg format, instance of Image class, not ndarray image = image.convert(mode) image = np.array(image) # YY => ndarray, uint8 values bet 0 and 255, shape 240x320x3 (h x w x ch) if height > 0 and width > 0: image = scipy.misc.imresize(image, (height, width), 'bilinear') # YY => ndarray, uint8 values bet 0 and 255, shape (h2 x w2 x ch) return image
def __init__(self): super(self).__init__() # Import from Image only when in use from PIL import Image self.Image = Image
def save_array_to_tiff(array, output, verbose=True): '''Y.G. Nov 1, 2017 Save array to a tif file ''' img = PIL.Image.fromarray(array) img.save( output ) if verbose: print( 'The data is save to: %s.'%( output ))
def load_pilatus(filename): '''Y.G. Nov 1, 2017 Load a pilatus 2D image ''' return np.array( PIL.Image.open(filename).convert('I') )
def get_avg_img( data_series, img_samp_index=None, sampling = 100, plot_ = False , save=False, *argv,**kwargs): '''Get average imagef from a data_series by every sampling number to save time''' if img_samp_index is None: avg_img = np.average(data_series[:: sampling], axis=0) else: avg_img = np.zeros_like( data_series[0] ) n=0 for i in img_samp_index: avg_img += data_series[i] n +=1 avg_img = np.array( avg_img) / n if plot_: fig, ax = plt.subplots() uid = 'uid' if 'uid' in kwargs.keys(): uid = kwargs['uid'] im = ax.imshow(avg_img , cmap='viridis',origin='lower', norm= LogNorm(vmin=0.001, vmax=1e2)) #ax.set_title("Masked Averaged Image") ax.set_title('uid= %s--Masked Averaged Image'%uid) fig.colorbar(im) if save: #dt =datetime.now() #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) path = kwargs['path'] if 'uid' in kwargs: uid = kwargs['uid'] else: uid = 'uid' #fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png' fp = path + "uid=%s--avg-img-"%uid + '.png' fig.savefig( fp, dpi=fig.dpi) #plt.show() return avg_img
def kitty(self): """The cure of boredom.""" try: async with self.session.get(self.caturl) as r: result = await r.json() cat = discord.Embed(description="\u2063", color=discord.Color(0xffb6c1)) cat.set_image(url=result['file']) # await self.bot.say(result['file']) await self.bot.say(embed=cat) except: await self.bot.say("Couldn't Get An Image")
def fox(self): """Another cure of boredom.""" try: async with self.session.get(self.foxurl) as r: result = await r.json() fox = discord.Embed(description="\u2063", color=discord.Color(0xffb6c1)) fox.set_image(url=result['file']) # await self.bot.say(result['file']) await self.bot.say(embed=fox) except: await self.bot.say("Couldn't Get An Image")