我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用PIL.ImageOps.fit()。
def generate_thumbnail(img_path, height, width, dest): """Generates a thumbnail of an image into the dest directory. Args: img_path (string): Relative directory path containing image to thumbnail. height (int): Height in pixels of thumbnail image. width (int): width in pixels of thumbnail image. dest (string): Relative directory path to output thumbnail. Returns: The relative path to the generated thumbnail. """ # Generate the new filename of the thumbnailed image. filename_no_ext = splitext(basename(img_path))[0] filename = '{}-{}x{}.jpg'.format(filename_no_ext, width, height) output_path = join(dest, filename) # Generate the thumbnail. image = Image.open(img_path) # Scale the image down to the thumbnail size. image = ImageOps.fit(image, (width, height)) image.save(output_path, 'JPEG') print('Generated thumbnail: {}'.format(output_path)) return output_path
def generate_avatar(key, size): colors = generate_colors(key) b = int(size * 0.06) # border width r1 = int(size * 0.24) r2 = r1 - b m = size / 2 board = Image.new('RGB', (size, size), colors[1]) draw = ImageDraw.Draw(board) draw.ellipse((b, b, size-b, size-b), fill=colors[0]) draw.rectangle((0, m-b, size, m+b), fill=colors[1]) draw.ellipse((m-r1, m-r1, m+r1, m+r1), fill=colors[1]) draw.ellipse((m-r2, m-r2, m+r2, m+r2), fill=colors[2]) mask = Image.new('L', (size, size), 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0, size, size), fill=255) avatar = ImageOps.fit(board, (size, size)) avatar.putalpha(mask) return avatar
def prepare_image(img, crop_width_perc=0, crop_height_perc=0, fit_image=True, grayscale=True): # convert to grayscale mode = 'L' if grayscale else 'RGB' result = img.convert(mode) # crop image image_size = result.size width_crop_size = int(image_size[0] * crop_width_perc / 2) if crop_width_perc > 0 else 0 height_crop_size = int(image_size[1] * crop_height_perc / 2) if crop_height_perc > 0 else 0 if width_crop_size or height_crop_size: result = result.crop( ( width_crop_size, height_crop_size, image_size[0] - width_crop_size, image_size[1] - height_crop_size ) ) # resize to 512x512 pixels resize_option = Image.ANTIALIAS if fit_image: return ImageOps.fit(result, RESHAPE, resize_option) return result.resize(RESHAPE, resize_option)
def create_thumbnail(source, identifier, size='large'): _size = settings.THUMBNAIL_SIZES.get(size) if not _size: log.exception('Invalid thumbnail size provided') raise InvalidThumbnailSize try: im = Image.open(source) if _size.get('type') == 'crop': log.debug('Cropping {}'.format(source)) imc = ImageOps.fit(im, _size.get('size'), Image.ANTIALIAS) imc.save(get_thumbnail_path(identifier, size), 'JPEG', quality=90) else: log.debug('Resizing {}'.format(source)) im.thumbnail(_size.get('size'), Image.ANTIALIAS) im.save(get_thumbnail_path(identifier, size), 'JPEG', quality=90) except IOError: log.exception('Could not create thumbnail of {}'.format(source)) raise CannotSaveThumbnail
def reshape_observation(self, observation): """ Crop non-square inputs to squares positioned with crop_centering. Resize to resize_shape. Optionally convert to grayscale. """ img = Image.fromarray(observation) img = ImageOps.fit(img, self.resize_shape, centering=self.crop_centering) if self.grayscale: img = img.convert('L') return np.array(img)
def __init__(self, cf, gps): self.cf = cf self.gps = gps if self.cf.head_file != "": #????? self.head self.head_alpha size = (self.cf.head_size, self.cf.head_size) mask = Image.new('L', size, 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0) + size, fill=255) im = Image.open(self.cf.head_file) self.head = ImageOps.fit(im, mask.size, centering=(0.5, 0.5)) self.head.putalpha(mask) _, _, _, self.head_alpha = self.head.split() else: self.head = None self.photos = [] if self.cf.photos_dir != "": if not os.path.exists(self.cf.photos_dir): raise Exception(self.cf.photos_dir + "??????????") if os.path.isdir(self.cf.photos_dir): for photo in os.listdir(self.cf.photos_dir): self.photos_append(os.path.join(self.cf.photos_dir, photo)) self.photos.sort() else: self.photos_append(self.cf.photos_dir) #Init camera if len(self.photos) > 0: self.camera = Image.open(os.path.join(self.cf.script_dir,"./camera.png")).convert("RGBA").resize((20,20),Image.ANTIALIAS) _, _, _, self.camera_alpha = self.camera.split() self.cameras_xy = []
def _image_preprocessing(filename, xsize, ysize): im = Image.open(filename) if im.mode != 'RGB': print('Mode: ', im.mode) tmp = im.convert('RGB') im.close() im = tmp downsampled_im = ImageOps.fit(im, (xsize, ysize), method=Image.LANCZOS) norm_im = np.array(downsampled_im, dtype=np.float32) downsampled_im.close() im.close() return norm_im
def _save_scaled_cropped_img(src, dest): image = load_img(src) image = fit(image, IMGS_DIM_2D, method=LANCZOS) image.save(dest) return image
def variation_resize(image, image_size, variation): """ ????????? ??????? ???????? ? ???????????? ? ????????? """ source_format = image.format source_size = image.size # ????? ????????? crop = bool(variation['crop']) stretch = bool(variation['stretch']) if crop: # ?????? ?? ???????? - ????????? ??? ???? if image_size == source_size: return image image = ImageOps.fit( image, image_size, method=Image.ANTIALIAS, centering=variation['center'] ) else: if stretch: image = image.resize(image_size, resample=Image.ANTIALIAS) else: image.thumbnail(image_size, resample=Image.ANTIALIAS) image.format = source_format return image
def variation_overlay(image, overlay): """ ????????? ??????? ?? ???????? """ overlay_img = Image.open(overlay) if overlay_img.size != image.size: overlay_img = ImageOps.fit(overlay_img, image.size, method=Image.ANTIALIAS) transparency_mask = get_transparency_mask(overlay_img, overlay_img.info) image.paste(overlay_img, mask=transparency_mask) return image
def variation_mask(image, mask): """ ??????? ???????? ?? ????? """ mask_img = Image.open(mask).convert('L') if mask_img.size != image.size: mask_img = ImageOps.fit(mask_img, image.size, method=Image.ANTIALIAS) transparency_mask = get_transparency_mask(mask_img, mask_img.info) background = Image.new('RGBA', image.size) background.paste(image, mask=transparency_mask) return background
def get_resized_image(img_path, height, width, save=True): image = Image.open(img_path) # it's because PIL is column major so you have to change place of width and height # this is stupid, i know image = ImageOps.fit(image, (width, height), Image.ANTIALIAS) if save: image_dirs = img_path.split('/') image_dirs[-1] = 'resized_' + image_dirs[-1] out_path = '/'.join(image_dirs) if not os.path.exists(out_path): image.save(out_path) image = np.asarray(image, np.float32) return np.expand_dims(image, 0)
def cache(self, url, size=None): # already in cache? if url in self.image_cache: return self # download image image = self.image_cache[url] = await download_image(self.ctx.bot.session, url) # fit if a size was provided if size: self.image_cache[url] = ImageOps.fit(image, size, Image.BICUBIC, 0.0, (0.5, 0.5)) return self
def trustnobody(self, ctx, image_source: converters.Image): """Trust nobody, not even yourself.""" async with ctx.typing(): m = Meme('resources/memes/trust_nobody.png', ctx) im = (await download_image(ctx.bot.session, image_source)) im = ImageOps.fit(im, (100, 100), Image.BICUBIC) m.paste(im, (82, 230)) im = im.crop((0, 0, 62, 100)) m.paste(im, (420, 250)) im.close() await m.render('trust_nobody.png') m.cleanup()
def getInterestingPic( ): url = 'https://api.flickr.com/services/rest/?method=flickr.interestingness.getList&api_key=API_KEY&format=json&nojsoncallback=1' url = url.replace( 'API_KEY', API_KEY ) print( url ) response = urlopen( url ).read().decode('utf-8') # print( response ) data = json.loads( response ) photo = random.choice(data['photos']['photo']) # print ( photo ) # https://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}_z.jpg # Info: https://www.flickr.com/services/api/misc.urls.html fn = str(photo['id']) + '_' + str(photo['secret']) + '_c.jpg' pic_url = 'https://farm{farm-id}.staticflickr.com/{server-id}/' pic_url = pic_url.replace( '{farm-id}', str(photo['farm']) ) pic_url = pic_url.replace( '{server-id}', str(photo['server']) ) pic_url += fn urlretrieve( pic_url, 'temp/' + fn ) # Convert to PNG size = 150, 150 im = Image.open( 'temp/' + fn ) im_thumb = ImageOps.fit( im, size, Image.ANTIALIAS ) im_thumb.save( 'temp/' + fn.replace('.jpg', '.png') ) print( pic_url ) return './temp/' + fn.replace('.jpg', '.png')
def make_autocropping_field(cls, width, height, upload_to="images", processor="fit", image_format="JPEG", matte_colour=(255, 255, 255, 255), **kwargs): """ Creates an imagekit.models.fields.ProcessedImageField that automatically scales and crops images """ assert processor in ("fit", "fill"), "invalid processor" if processor == "fill": processors = [ResizeToFill(width, height)] else: processors = [ResizeToFit(width, height, mat_color=matte_colour)] assert image_format in ("JPEG", "PNG"), "invalid image_format" if image_format == "PNG": image_options = {"optimize": True} else: image_options = {"quality": IMAGE_QUALITY} help_text = "Automatically scaled to %s×%s" % (width, height) help_text_additional = kwargs.pop("help_text", None) if help_text_additional: help_text = "%s. %s" % (help_text_additional, help_text) return ProcessedImageField( processors=processors, format=image_format, options=image_options, upload_to=upload_to, help_text=help_text, **kwargs )
def _crop_image(cls, origin_image, target_image, size, crop_box, image_format="JPEG"): """ Resizes an image from one model field and saves into another :param origin_image: django.db.models.fields.files.ImageFieldFile :param target_image: django.db.models.fields.files.ImageFieldFile :param size: tuple of final desired width and height :param crop_box: str, 4-coordinate crop box :param image_format: str, Pillow Image format """ # Original photo origin_image.seek(0) image_file = Image.open(origin_image) # Convert to RGB if image_file.mode not in ("L", "RGB"): image_file = image_file.convert("RGB") if crop_box: try: values = [int(x) for x in crop_box.split(",")] width = abs(values[2] - values[0]) height = abs(values[3] - values[1]) if width and height and (width != image_file.size[0] or height != image_file.size[1]): image_file = image_file.crop(values) except (ValueError, TypeError, IndexError): # There's garbage in the cropping field, ignore print("Unable to parse crop_box parameter value '%s'. Ignoring." % crop_box) image_file = ImageOps.fit(image_file, size, method=Image.LANCZOS) image_content = BytesIO() image_file.save(image_content, format=image_format, quality=IMAGE_QUALITY) image_content = ImageFile(image_content, origin_image.name) target_image.save(name=image_content.name, content=image_content, save=False)
def _image_preprocessing(filename, xsize, ysize): im = Image.open(filename) if filename.endswith('.png'): im = im.convert('RGB') downsampled_im = ImageOps.fit(im, (xsize, ysize), method=Image.LANCZOS) norm_im = np.array(downsampled_im, dtype=np.float32) / 255. downsampled_im.close() im.close() return norm_im
def fix_profile_picture(self): if self.image: if self.image.width > 300 or self.image.height > 300: ImageOps.fit(Image.open(self.image.path),(300,300),Image.ANTIALIAS,centering=(0.5,0.5)).save(self.image.path,"PNG",quality=100)
def create_thumbnail(file_path): thumbnail_filename = utils.get_thumb_filename(file_path) thumbnail_format = utils.get_image_format(os.path.splitext(file_path)[1]) image = default_storage.open(file_path) image = Image.open(image) file_format = image.format # Convert to RGB if necessary # Thanks to Limodou on DjangoSnippets.org # http://www.djangosnippets.org/snippets/20/ if image.mode not in ('L', 'RGB'): image = image.convert('RGB') # scale and crop to thumbnail imagefit = ImageOps.fit(image, THUMBNAIL_SIZE, Image.ANTIALIAS) thumbnail_io = BytesIO() imagefit.save(thumbnail_io, format=file_format) thumbnail = InMemoryUploadedFile( thumbnail_io, None, thumbnail_filename, thumbnail_format, len(thumbnail_io.getvalue()), None) thumbnail.seek(0) return default_storage.save(thumbnail_filename, thumbnail)
def fit_model(training_sample): df = pd.read_csv(training_sample) # convert values to bool df['d'] = df['d'].astype('bool') l_model = GradientBoostingClassifier() l_model = l_model.fit( df[df.columns.difference(['d'])], df['d'] ) return l_model
def make_thumbnail(self): size = (300, 300) f = default_storage.open(self.img.name) image = Image.open(f) ftype = image.format image = ImageOps.fit(image, size, Image.ANTIALIAS) path, ext = os.path.splitext(self.img.name) name = os.path.basename(path) thumbnail_name = '%s_thumb%s' % (name, ext) temp_file = BytesIO() image.save(temp_file, ftype) temp_file.seek(0) content_file = ContentFile(temp_file.read()) self.img_thumbnail.save(thumbnail_name, content_file) temp_file.close() content_file.close() f.close()
def generate_thumbnail(photo, width=256, height=256, aspect='cover', quality=75): global_state.increment('photo_thumbnailer_tasks_running') try: pf = photo.files.filter(mimetype='image/jpeg')[0] im = Image.open(pf.path) if im.mode != 'RGB': im = im.convert("RGB") metadata = PhotoMetadata(pf.path) if metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW']: im = im.rotate(-90, expand=True) elif metadata.get('Orientation') in ['Rotate 90 CCW', 'Rotate 270 CW']: im = im.rotate(90, expand=True) if aspect == 'cover': im = ImageOps.fit(im, (width, height), Image.ANTIALIAS) else: im.thumbnail((width, height), Image.ANTIALIAS) directory = os.path.join(settings.THUMBNAIL_ROOT, '{}x{}_{}_q{}'.format(width, height, aspect, quality)) if not os.path.exists(directory): os.makedirs(directory) path = os.path.join(directory, '{}.jpg'.format(photo.id)) im.save(path, format='JPEG', quality=quality) photo.last_thumbnailed_version = 0 photo.last_thumbnailed_at = timezone.now() photo.save() finally: global_state.decrement('photo_thumbnailer_tasks_running')
def make_thumbnail(self): import os from PIL import Image, ImageOps from io import BytesIO, StringIO, FileIO from django.core.files.base import ContentFile from django.core.files.storage import default_storage size = (300, 300) # Default storage?? FileField?? ???? f = default_storage.open(self.img) print('f : %s' % f) # Image.open?? ??? Image????? (image) image = Image.open(f) # Image.format? JPEG, PNG, BMP? ????? ??? ftype = image.format print('ftype : %s' % ftype) # ImageOps.fit???? ???? ?????? ?? image = ImageOps.fit(image, size, Image.ANTIALIAS) # ??? ?? img? ??? ???? ??? path, ext = os.path.splitext(self.img.name) name = os.path.basename(path) # ?????_thumb.??? ??? ? thumbnail_name = '%s_thumb%s' % (name, ext) # ?? ??? ???? ?? ?? temp_file = BytesIO() image.save(temp_file, ftype) temp_file.seek(0) # img_thumbnail??? ?? ????? ?? # Django? FileField? ??? ????? ContentFile????? ? content_file = ContentFile(temp_file.read()) self.img_thumbnail.save(thumbnail_name, content_file) # ??? ?? ??? temp_file.close() content_file.close() f.close() return True
def make_thumbnail(self): import os from PIL import Image, ImageOps from io import BytesIO from django.core.files.base import ContentFile from django.core.files.storage import default_storage size = (300, 300) # Default storage?? FileField?? ???? f = default_storage.open(self.img) print('f : %s' % f) # Image.open?? ??? Image????? (image) image = Image.open(f) # Image.format? JPEG, PNG, BMP? ????? ??? ftype = image.format print('ftype : %s' % ftype) # ImageOps.fit???? ???? ?????? ?? image = ImageOps.fit(image, size, Image.ANTIALIAS) # ??? ?? img? ??? ???? ??? path, ext = os.path.splitext(self.img.name) name = os.path.basename(path) # ?????_thumb.??? ??? ? thumbnail_name = '%s_thumb%s' % (name, ext) # ?? ??? ???? ?? ?? temp_file = BytesIO() image.save(temp_file, ftype) temp_file.seek(0) # img_thumbnail??? ?? ????? ?? # Django? FileField? ??? ????? ContentFile????? ? content_file = ContentFile(temp_file.read()) self.img_thumbnail.save(thumbnail_name, content_file) # ??? ?? ??? temp_file.close() content_file.close() f.close() return True
def run(inputString): """ Classify the input using the loaded model """ start = t.default_timer() images=json.loads(inputString) result = [] totalPreprocessTime = 0 totalEvalTime = 0 totalResultPrepTime = 0 for base64ImgString in images: if base64ImgString.startswith('b\''): base64ImgString = base64ImgString[2:-1] base64Img = base64ImgString.encode('utf-8') # Preprocess the input data startPreprocess = t.default_timer() decoded_img = base64.b64decode(base64Img) img_buffer = BytesIO(decoded_img) # Load image with PIL (RGB) pil_img = Image.open(img_buffer).convert('RGB') pil_img = ImageOps.fit(pil_img, (224, 224), Image.ANTIALIAS) rgb_image = np.array(pil_img, dtype=np.float32) # Resnet trained with BGR bgr_image = rgb_image[..., [2, 1, 0]] imageData = np.ascontiguousarray(np.rollaxis(bgr_image, 2)) endPreprocess = t.default_timer() totalPreprocessTime += endPreprocess - startPreprocess # Evaluate the model using the input data startEval = t.default_timer() imgPredictions = np.squeeze(trainedModel.eval({trainedModel.arguments[0]:[imageData]})) endEval = t.default_timer() totalEvalTime += endEval - startEval # Only return top 3 predictions startResultPrep = t.default_timer() resultIndices = (-np.array(imgPredictions)).argsort()[:topResult] imgTopPredictions = [] for i in range(topResult): imgTopPredictions.append((labelLookup[resultIndices[i]], imgPredictions[resultIndices[i]] * 100)) endResultPrep = t.default_timer() result.append(imgTopPredictions) totalResultPrepTime += endResultPrep - startResultPrep end = t.default_timer() logger.info("Predictions: {0}".format(result)) logger.info("Predictions took {0} ms".format(round((end-start)*1000, 2))) logger.info("Time distribution: preprocess={0} ms, eval={1} ms, resultPrep = {2} ms".format(round(totalPreprocessTime * 1000, 2), round(totalEvalTime * 1000, 2), round(totalResultPrepTime * 1000, 2))) actualWorkTime = round((totalPreprocessTime + totalEvalTime + totalResultPrepTime)*1000, 2) return (result, 'Computed in {0} ms'.format(actualWorkTime))