Python keras.applications.imagenet_utils 模块,preprocess_input() 实例源码

我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用keras.applications.imagenet_utils.preprocess_input()

项目:keras-squeezenet    作者:rcmalli    | 项目源码 | 文件源码
def testTHPrediction(self):
        keras.backend.set_image_dim_ordering('th')
        model = SqueezeNet()
        img = image.load_img('images/cat.jpeg', target_size=(227, 227))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        decoded_preds = decode_predictions(preds)
        #print('Predicted:', decoded_preds)
        self.assertIn(decoded_preds[0][0][1], 'tabby')
        #self.assertAlmostEqual(decode_predictions(preds)[0][0][2], 0.82134342)
项目:ml_idiot    作者:songjun54cm    | 项目源码 | 文件源码
def ext_img_feat(image_folder, batch_size):
    base_model = ResNet50(weights='imagenet')
    img_model = Model(input=base_model.input, output=base_model.get_layer('res5c').output)

    img_list = os.listdir(image_folder)
    all_img_feats = list()
    si = 0
    while si < len(img_list):
        batch_img = img_list[si:si+batch_size]
        si += batch_size
        imgs = []
        for imgf in batch_img:
            img_path = os.path.join(image_folder, imgf)
            img = image.load_img(img_path, target_size=(224, 224))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            imgs.append(x)
        imgs = np.concatenate(imgs, axis=0)
        img_feats = img_model.predict(imgs)
        all_img_feats.append(img_feats)
        print('%d images extracted\r'%si),
项目:picasso    作者:merantix    | 项目源码 | 文件源码
def preprocess(self, raw_inputs):
        """
        Args:
            raw_inputs (list of Images): a list of PIL Image objects
        Returns:
            array (float32): num images * height * width * num channels
        """
        image_arrays = []
        for raw_im in raw_inputs:
            im = raw_im.resize(VGG16_DIM[:2], Image.ANTIALIAS)
            im = im.convert('RGB')
            arr = np.array(im).astype('float32')
            image_arrays.append(arr)

        all_raw_inputs = np.array(image_arrays)
        return imagenet_utils.preprocess_input(all_raw_inputs)
项目:deep-lossy-fun    作者:PetarV-    | 项目源码 | 文件源码
def load_and_process(img_path, target_size=None):
    # Feed in the image, convert to array
    img = load_img(img_path, target_size=target_size)
    img = img_to_array(img)

    # Add the batch dimension
    img = np.expand_dims(img, axis=0)

    # Perform the usual ImageNet preprocessing
    img = preprocess_input(img)

    return img
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def preprocess_input(x, data_format=None):
    """Preprocesses a tensor encoding a batch of images.

    # Arguments
        x: input Numpy tensor, 4D.
        data_format: data format of the image tensor.

    # Returns
        Preprocessed tensor.
    """
    x = _preprocess_input(x, data_format=data_format)
    x *= 0.017  # scale values
    return x
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def _load_image_from_uri(local_uri):
    img = (PIL.Image
           .open(local_uri)
           .convert('RGB')
           .resize((299, 299), PIL.Image.ANTIALIAS))
    img_arr = np.array(img).astype(np.float32)
    img_tnsr = preprocess_input(img_arr[np.newaxis, :])
    return img_tnsr
项目:keras-squeezenet    作者:rcmalli    | 项目源码 | 文件源码
def testTFwPrediction(self):
        keras.backend.set_image_dim_ordering('tf')
        model = SqueezeNet()
        img = image.load_img('images/cat.jpeg', target_size=(227, 227))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        decoded_preds = decode_predictions(preds)
        #print('Predicted:', decoded_preds)
        self.assertIn(decoded_preds[0][0][1], 'tabby')
        #self.assertAlmostEqual(decode_predictions(preds)[0][0][2], 0.82134342)
项目:catrank    作者:jmhessel    | 项目源码 | 文件源码
def load_images(image_list):
    '''
    Given a list of images, returns a numpy tensor of those images.
    '''
    images = []
    for i in image_list:
        c_img = np.expand_dims(image.img_to_array(image.load_img(i, target_size = (224, 224))), axis = 0)
        images.append(c_img)
    return preprocess_input(np.vstack(images))
项目:Aesthetic_attributes_maps    作者:gautamMalu    | 项目源码 | 文件源码
def prepare_image(_image_src, target_size):
    '''
        Takes image source as input as return
        processed image array ready for train/test/val
    :param _image_src: source of image
    :return: image_array
    '''
    img = image.load_img(_image_src, size = target_size)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x
项目:ssd_mlengine    作者:monochromegane    | 项目源码 | 文件源码
def generate(self, train=True):
        while True:
            if train:
                shuffle(self.train_keys)
                keys = self.train_keys
            else:
                shuffle(self.val_keys)
                keys = self.val_keys
            inputs = []
            targets = []
            for key in keys:
                img_path = self.path_prefix + key
                img = imread(img_path).astype('float32')
                y = self.gt[key].copy()
                if train and self.do_crop:
                    img, y = self.random_sized_crop(img, y)
                img = imresize(img, self.image_size).astype('float32')
                if train:
                    shuffle(self.color_jitter)
                    for jitter in self.color_jitter:
                        img = jitter(img)
                    if self.lighting_std:
                        img = self.lighting(img)
                    if self.hflip_prob > 0:
                        img, y = self.horizontal_flip(img, y)
                    if self.vflip_prob > 0:
                        img, y = self.vertical_flip(img, y)
                y = self.bbox_util.assign_boxes(y)
                inputs.append(img)
                targets.append(y)
                if len(targets) == self.batch_size:
                    tmp_inp = np.array(inputs)
                    tmp_targets = np.array(targets)
                    inputs = []
                    targets = []
                    yield preprocess_input(tmp_inp), tmp_targets
项目:kr_faster_rcnn    作者:romyny    | 项目源码 | 文件源码
def load_im_data(path, roi_entry, 
                 target_size=None, dim_ordering='default'):
    if pil_im is None:
        raise ImportError('Could not import PIL.Image. '
                          'The use of `array_to_img` requires PIL.')
    im = pil_im.open(path)    
    if roi_entry['flipped']:
        im = im.transpose(pil_im.FLIP_LEFT_RIGHT)

    im = im.convert('RGB')

    # substract pixel means
    #im = preprocess_input(im, dim_ordering)

    gt_inds = np.where(roi_entry['gt_classes'] != 0)[0]
    gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)

    #resize im to match with network input
    if target_size == None:
        target_size = (224,224)

    w, h = im.size
    s_w = float(target_size[0]) / float(w)
    s_h = float(target_size[1]) / float(h)
    gt_boxes[:, 0] = roi_entry['boxes'][gt_inds, 0]*s_w
    gt_boxes[:, 2] = roi_entry['boxes'][gt_inds, 2]*s_w
    gt_boxes[:, 1] = roi_entry['boxes'][gt_inds, 1]*s_h
    gt_boxes[:, 3] = roi_entry['boxes'][gt_inds, 3]*s_h
    gt_boxes[:, 4] = roi_entry['gt_classes'][gt_inds]
    im = im.resize(size=target_size, resample=pil_im.BILINEAR)

    return im, gt_boxes
项目:EvadeML-Zoo    作者:mzweilin    | 项目源码 | 文件源码
def scaling_np(X, scaling=False):
    if scaling:
        X = X + 0.5
    X_uint8 = np.clip(np.rint(X*255), 0, 255)
    X_bgr = preprocess_input(X_uint8)
    return X_bgr
项目:Deconvnet-keras    作者:Jallet    | 项目源码 | 文件源码
def main():
    parser = argparser()
    args = parser.parse_args()
    image_path = args.image
    layer_name = args.layer_name
    feature_to_visualize = args.feature
    visualize_mode = args.mode

    model = vgg16.VGG16(weights = 'imagenet', include_top = True)
    layer_dict = dict([(layer.name, layer) for layer in model.layers])
    if not layer_dict.has_key(layer_name):
        print('Wrong layer name')
        sys.exit()

    # Load data and preprocess
    img = Image.open(image_path)
    img = img.resize(224, 224)
    img_array = np.array(img)
    img_array = np.transpose(img_array, (2, 0, 1))
    img_array = img_array[np.newaxis, :]
    img_array = img_array.astype(np.float)
    img_array = imagenet_utils.preprocess_input(img_array)

    deconv = visualize(model, img_array, 
            layer_name, feature_to_visualize, visualize_mode)

    # postprocess and save image
    deconv = np.transpose(deconv, (1, 2, 0))
    deconv = deconv - deconv.min()
    deconv *= 1.0 / (deconv.max() + 1e-8)
    deconv = deconv[:, :, ::-1]
    uint8_deconv = (deconv * 255).astype(np.uint8)
    img = Image.fromarray(uint8_deconv, 'RGB')
    img.save('results/{}_{}_{}.png'.format(layer_name, feature_to_visualize, visualize_mode))
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def img2tensor(img):
    '''
    '''
    img = image.img_to_array(img)
    img = np.expand_dims(img,axis=0)
    img = preprocess_input(img)

    return img
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def img2tensor(img, img_shape):
    '''
        Transforms and preprocesses image for vgg model
        Inputs:
            img: numpy array, rgb image
        Outputs:
            tensor
    '''
    img = imresize(img, img_shape)
    img = image.img_to_array(img)    
    img = np.expand_dims(img,axis=0)
    img = preprocess_input(img)

    return img
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def process(url='./url/2/image.jpg'):

    import cv2
    import numpy as np
    img = cv2.imread(url)
    img = cv2.resize(img, dsize=(224, 224))
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(np.asarray(img, dtype='float64'))
    return img  # img.shape (1, 224, 224, 3) type float64 ; BGR format by default from OpenCV
项目:Neural_Artistic_Style_Transfer    作者:giuseppebonaccorso    | 项目源码 | 文件源码
def pre_process_image(image):
        return preprocess_input(image)
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def add_channels(self):
        n_channels = self.n_channels

        if n_channels == 1:
            super().add_channels()
        else:
            X = self.X
            if X.ndim < 4:  # if X.dim == 4, no need to add a channel rank.
                N, img_rows, img_cols = X.shape
                if K.image_dim_ordering() == 'th':
                    X = X.reshape(X.shape[0], 1, img_rows, img_cols)
                    X = np.concatenate([X, X, X], axis=1)
                    input_shape = (n_channels, img_rows, img_cols)
                else:
                    X = X.reshape(X.shape[0], img_rows, img_cols, 1)
                    X = np.concatenate([X, X, X], axis=3)
                    input_shape = (img_rows, img_cols, n_channels)
            else:
                if K.image_dim_ordering() == 'th':
                    N, Ch, img_rows, img_cols = X.shape
                    if Ch == 1:
                        X = np.concatenate([X, X, X], axis=1)
                    input_shape = (n_channels, img_rows, img_cols)
                else:
                    N, img_rows, img_cols, Ch = X.shape
                    if Ch == 1:
                        X = np.concatenate([X, X, X], axis=3)
                    input_shape = (img_rows, img_cols, n_channels)

            if self.preprocessing_flag:
                X = preprocess_input(X)
            self.X = X
            self.input_shape = input_shape
            # self.img_info = {'channels': n_channels,
            #                 'rows': img_rows, 'cols': img_cols}
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def get_features_pretrained(X, PretrainedModel=VGG19, preprocess_input=preprocess_input):
    """
    get features by pre-trained networks
    :param Pretrained: VGG19 is default
    :return: features
    """
    if preprocess_input is not None:
        X = preprocess_input(X)
    model = PretrainedModel(weights='imagenet', include_top=False, input_shape=X.shape[1:])
    features = model.predict(X)
    return features
项目:keras-transfer-learning-for-oxford102    作者:Arsey    | 项目源码 | 文件源码
def load_img(self, img_path):
        img = image.load_img(img_path, target_size=self.img_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        return preprocess_input(x)[0]
项目:deeputil    作者:Avkash    | 项目源码 | 文件源码
def preprocess_image_array(imgArray, show_info=True):
    """
    :param image:
    :return:
    """
    assert len(imgArray.shape) == 3

    if (imgArray.shape[2]) == 1:
        raise ValueError('Error: Preprocessing id done for color image only and input image is gray...')
    utils.helper_functions.show_print_message("Now pre-processing the image to get ready for classification..", show_info)
    img_array = np.expand_dims(imgArray, axis=0)
    img_array = preprocess_input(img_array)
    return img_array
项目:ActionRecognition    作者:woodfrog    | 项目源码 | 文件源码
def preprocess_single_frame(frame):
    frame = preprocess_input(frame)
    frame /= 255
    return frame
项目:caption_generator    作者:anuragmishracse    | 项目源码 | 文件源码
def load_image(path):
    img = image.load_img(path, target_size=(224,224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return np.asarray(x)
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def seg_data_generator(stride,n_classes,img_dir,label_dir,img_list,preprocess = True):
    while 1:
        LUT = np.eye(n_classes)

        for img_id in img_list:

            # load image
            img_path = img_dir + img_id
            x = skimage.io.imread(img_path)

            # load label
            label_path = label_dir + img_id[:-3] + 'png'
            y = skimage.io.imread(label_path) # interprets the image as a colour image

            #only yield is the images exist
            is_img = type(x) is np.ndarray and type(y) is np.ndarray
            not_empty = len(x.shape) > 0 and len(y.shape) > 0 

            if  is_img and not_empty:
                #deal with gray value images
                if len(x.shape) == 2:
                    x = skimage.color.gray2rgb(x)

                # only take one channel
                if len(y.shape) > 2:
                    y = y[...,0] 

                # treat binary images
                if np.max(y) == 255:
                    y = np.clip(y,0,1)

                # crop if image dims do not match stride
                w_rest = x.shape[0] % stride
                h_rest = x.shape[1] % stride

                if w_rest > 0:
                    w_crop_1 = np.round(w_rest / 2)
                    w_crop_2 = w_rest - w_crop_1

                    x = x[w_crop_1:-w_crop_2,:,:]
                    y = y[w_crop_1:-w_crop_2,:]
                if h_rest > 0:
                    h_crop_1 = np.round(h_rest / 2)
                    h_crop_2 = h_rest - h_crop_1

                    x = x[:,h_crop_1:-h_crop_2,:]
                    y = y[:,h_crop_1:-h_crop_2]

                # prepare for NN
                x = np.array(x,dtype='float')
                x = x[np.newaxis,...]

                if preprocess == True:
                    x = preprocess_input(x)

                y = LUT[y]
                y = y[np.newaxis,...] # make it a 4D tensor

                yield x, y
项目:games-cnn    作者:vanHavel    | 项目源码 | 文件源码
def classify_image(image_paths=['img.jpg'],
    model_path=os.path.join('model', 'model.mod'),
    cutoff_file='cutoffs.npy'):
    # load model
    model = load_model(model_path)

    # read genre file 
    genre_file_path = os.path.join('training_data', 'genres.txt')
    with open(genre_file_path, 'r') as handler:
        genres = handler.readlines()

    # determine preprocess method
    preprocess_path = os.path.join('training_data', 'preprocess.txt')
    with open(preprocess_path, 'r') as preprocess_file:
        dictionary = ast.literal_eval(preprocess_file.read())
        preprocess_method = dictionary['preprocess']
    if preprocess_method == 'xception':
        preprocess = preprocess_xception
    elif preprocess_method == 'vgg':
        preprocess = imagenet_utils.preprocess_input
    elif preprocess_method == 'none':
        preprocess = lambda x:x

    # preprocess images
    input_shape = model.layers[0].input_shape
    dimension = (input_shape[1], input_shape[2])
    screenshots = [process_screen(image_path, dimension, preprocess) for image_path in image_paths]

    # load cutoffs
    cutoffs = np.load(os.path.join('cutoffs', cutoff_file))

    # predict classes
    predictions = model.predict(np.array(screenshots))
    for prediction in predictions:
        print(prediction)
        classes = [i for i in range(0, len(prediction)) if prediction[i] >= cutoffs[i]]
        print('Predicted genres:')
        for c in classes:
            print(genres[c][:-1])
        print('True genres:')

# preprocess a single screen
项目:Keras-FCN    作者:aurora95    | 项目源码 | 文件源码
def inference(model_name, weight_file, image_size, image_list, data_dir, label_dir, return_results=True, save_dir=None,
              label_suffix='.png',
              data_suffix='.jpg'):
    current_dir = os.path.dirname(os.path.realpath(__file__))
    # mean_value = np.array([104.00699, 116.66877, 122.67892])
    batch_shape = (1, ) + image_size + (3, )
    save_path = os.path.join(current_dir, 'Models/'+model_name)
    model_path = os.path.join(save_path, "model.json")
    checkpoint_path = os.path.join(save_path, weight_file)
    # model_path = os.path.join(current_dir, 'model_weights/fcn_atrous/model_change.hdf5')
    # model = FCN_Resnet50_32s((480,480,3))

    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    session = tf.Session(config=config)
    K.set_session(session)

    model = globals()[model_name](batch_shape=batch_shape, input_shape=(512, 512, 3))
    model.load_weights(checkpoint_path, by_name=True)

    model.summary()

    results = []
    total = 0
    for img_num in image_list:
        img_num = img_num.strip('\n')
        total += 1
        print('#%d: %s' % (total,img_num))
        image = Image.open('%s/%s%s' % (data_dir, img_num, data_suffix))
        image = img_to_array(image)  # , data_format='default')

        label = Image.open('%s/%s%s' % (label_dir, img_num, label_suffix))
        label_size = label.size

        img_h, img_w = image.shape[0:2]

        # long_side = max(img_h, img_w, image_size[0], image_size[1])
        pad_w = max(image_size[1] - img_w, 0)
        pad_h = max(image_size[0] - img_h, 0)
        image = np.lib.pad(image, ((pad_h/2, pad_h - pad_h/2), (pad_w/2, pad_w - pad_w/2), (0, 0)), 'constant', constant_values=0.)
        # image -= mean_value
        '''img = array_to_img(image, 'channels_last', scale=False)
        img.show()
        exit()'''
        # image = cv2.resize(image, image_size)

        image = np.expand_dims(image, axis=0)
        image = preprocess_input(image)

        result = model.predict(image, batch_size=1)
        result = np.argmax(np.squeeze(result), axis=-1).astype(np.uint8)

        result_img = Image.fromarray(result, mode='P')
        result_img.palette = label.palette
        # result_img = result_img.resize(label_size, resample=Image.BILINEAR)
        result_img = result_img.crop((pad_w/2, pad_h/2, pad_w/2+img_w, pad_h/2+img_h))
        # result_img.show(title='result')
        if return_results:
            results.append(result_img)
        if save_dir:
            result_img.save(os.path.join(save_dir, img_num + '.png'))
    return results