我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用keras.backend.set_image_dim_ordering()。
def _test_model_compile(model): for ordering in DIM_ORDERING: K.set_image_dim_ordering(ordering) model.compile(loss="categorical_crossentropy", optimizer="sgd") assert True, "Failed to compile with '{}' dim ordering".format(ordering)
def __init__(self, *args, **kwargs): from keras.layers.core import Dense, Flatten from keras.layers.convolutional import Convolution2D from keras import backend as K if K.backend() == 'theano': K.set_image_dim_ordering('tf') self.Dense = Dense self.Flatten = Flatten self.Convolution2D = Convolution2D self.kernel = 4 self.stride = (2, 2) super(ConvDQN, self).__init__(*args, **kwargs)
def test_grams_th(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('th') input = np.zeros((1, 3, 4, 4)) iter = 0 for i in range(input.shape[1]): for j in range(input.shape[2]): for k in range(input.shape[3]): input[0][i][j][k] = iter iter += 1 input = input.astype(K.floatx()) true_grams = np.array([[ [1240, 3160, 5080], [3160, 9176,15192], [5080, 15192, 25304] ]]).astype(K.floatx()) true_grams /= input.shape[1] * input.shape[2] * input.shape[3] x = K.placeholder(input.shape, name='x') gram_mat = grams(x) get_grams = K.function([x], [gram_mat]) K.set_image_dim_ordering(previous_image_dim_ordering) pred_grams = get_grams([input])[0] self.assertEqual(True, (pred_grams==true_grams).all())
def test_grams_tf(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') input = np.zeros((1, 3, 4, 4)) iter = 0 for i in range(input.shape[1]): for j in range(input.shape[2]): for k in range(input.shape[3]): input[0][i][j][k] = iter iter += 1 input = input.astype(K.floatx()) input = np.transpose(input, (0, 2, 3, 1)) true_grams = np.array([[ [1240, 3160, 5080], [3160, 9176,15192], [5080, 15192, 25304] ]]).astype(K.floatx()) true_grams /= input.shape[1] * input.shape[2] * input.shape[3] x = K.placeholder(input.shape, name='x') gram_mat = grams(x) get_grams = K.function([x], [gram_mat]) K.set_image_dim_ordering(previous_image_dim_ordering) pred_grams = get_grams([input])[0] self.assertEqual(True, (pred_grams==true_grams).all())
def test_load_mean_tf(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') mean = load_mean() real_mean = np.array([[[[103.939, 116.779, 123.68]]]]) K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(True, (mean==real_mean).all())
def test_preprocess_tf_vgg19(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') blue_im = misc.imread(dir + '/../fixture/blue.png') red_im = np.array(misc.imread(dir + '/../fixture/red.png').astype(K.floatx())) red_im = (red_im - load_mean()[0]).astype('uint8') new_red_im = preprocess(blue_im, type='vgg19').astype('uint8') K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(True, (red_im==new_red_im).all())
def test_preprocess_tf_none(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') blue_im = misc.imread(dir + '/../fixture/blue.png') new_blue_im = preprocess(blue_im).astype('uint8') K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(True, (blue_im==new_blue_im).all())
def test_preprocess_tf_st(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') blue_im = misc.imread(dir + '/../fixture/blue.png') red_im = np.array(misc.imread(dir + '/../fixture/red.png').astype(K.floatx())) new_red_im = preprocess(blue_im, type='st').astype('uint8') K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(True, (red_im==new_red_im).all())
def test_load_image(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') blue_im = load_image(dir + '/../fixture/blue.png') K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(blue_im.shape, (600, 600, 3))
def test_load_image_th(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('th') blue_im = load_image(dir + '/../fixture/blue.png') K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(blue_im.shape, (3, 600, 600))
def test_load_images_limit(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') file = load_images(dir + '/../fixture', 1) K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(file.shape, (1, 600, 600, 3))
def test_deprocess(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') blue_im = misc.imread(dir + '/../fixture/blue.png') im = preprocess(blue_im) im = deprocess(im) K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(True, (blue_im==im).all())
def test_deprocess_th(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('th') blue_im = misc.imread(dir + '/../fixture/blue.png') im = preprocess(blue_im) im = deprocess(im) K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(True, (blue_im==im).all())
def test_create_noise_tensor(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') file = create_noise_tensor(4, 5 ,3) K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(file.shape, (1, 4, 5, 3))
def test_resize(self): previous_image_dim_ordering = K.image_dim_ordering() K.set_image_dim_ordering('tf') ims = load_images(dir + '/../fixture') ims = resize(ims, (150, 150)) K.set_image_dim_ordering(previous_image_dim_ordering) self.assertEqual(ims.shape, (4, 150, 150, 3))
def test_convolution_transpose_tf(self): if K._BACKEND != 'tensorflow': return True K.set_image_dim_ordering('tf') border_mode = 'valid' batch = 1 height = 10 width = 10 channels_in = 1 channels_out = 2 kernel_size = 3 # effective kernel size: kernel_size + (kernel_size - 1) * (rate - 1) rate = 2 input_shape = (height, width, channels_in) input = Input(shape=input_shape, dtype=K.floatx()) conv_layer = ATrousConvolution2D(channels_out, kernel_size, kernel_size, rate, dim_ordering=K.image_dim_ordering(), init='one', border_mode=border_mode, activation='linear') output = conv_layer(input) model = Model(input=[input], output=[output]) model.compile(loss='mean_squared_error', optimizer='sgd') x = np.ones((batch,) + input_shape).astype(K.floatx()) kernel = conv_layer.W output_model = model.predict(x) if K._BACKEND == 'tensorflow': y = tf.nn.atrous_conv2d(x, kernel, rate, padding=border_mode.upper()) output = y.eval(session=K.get_session()) self.assertEqual(output_model.shape, (1, 6, 6, 2)) self.assertEqual(output.shape, (1, 6, 6, 2)) self.assertEqual(True, (output==output_model).all())
def test_convolution_transpose_tf_sameborder(self): if K._BACKEND != 'tensorflow': return True K.set_image_dim_ordering('tf') border_mode = 'same' batch = 1 height = 10 width = 10 channels_in = 1 channels_out = 2 kernel_size = 3 # effective kernel size: kernel_size + (kernel_size - 1) * (rate - 1) rate = 2 input_shape = (height, width, channels_in) input = Input(shape=input_shape, dtype=K.floatx()) conv_layer = ATrousConvolution2D(channels_out, kernel_size, kernel_size, rate, dim_ordering=K.image_dim_ordering(), init='one', border_mode=border_mode, activation='linear') output = conv_layer(input) model = Model(input=[input], output=[output]) model.compile(loss='mean_squared_error', optimizer='sgd') x = np.ones((batch,) + input_shape).astype(K.floatx()) kernel = conv_layer.W output_model = model.predict(x) if K._BACKEND == 'tensorflow': y = tf.nn.atrous_conv2d(x, kernel, rate, padding=border_mode.upper()) output = y.eval(session=K.get_session()) self.assertEqual(output_model.shape, (1, 10, 10, 2)) self.assertEqual(output.shape, (1, 10, 10, 2)) self.assertEqual(True, (output==output_model).all())
def set_img_format(): try: if K.backend() == 'theano': K.set_image_data_format('channels_first') else: K.set_image_data_format('channels_last') except AttributeError: if K._BACKEND == 'theano': K.set_image_dim_ordering('th') else: K.set_image_dim_ordering('tf')
def __init__( self, input_size, nb_channels=3, conditional=False, latent_dim=10, nb_pixelcnn_layers=13, nb_filters=128, filter_size_1st=(7,7), filter_size=(3,3), optimizer='adadelta', es_patience=100, save_root='/tmp/pixelcnn', save_best_only=False, **kwargs): ''' Args: input_size ((int,int)) : (height, width) pixels of input images nb_channels (int) : Number of channels for input images. (1 for grayscale images, 3 for color images) conditional (bool) : if True, use latent vector to model the conditional distribution p(x|h) (default:False) latent_dim (int) : (if conditional==True,) Dimensions for latent vector. nb_pixelcnn_layers (int) : Number of layers (except last two ReLu layers). (default:13) nb_filters (int) : Number of filters (feature maps) for each layer. (default:128) filter_size_1st ((int, int)): Kernel size for the first layer. (default: (7,7)) filter_size ((int, int)) : Kernel size for the subsequent layers. (default: (3,3)) optimizer (str) : SGD optimizer (default: 'adadelta') es_patience (int) : Number of epochs with no improvement after which training will be stopped (EarlyStopping) save_root (str) : Root directory to which {trained model file, parameter.txt, tensorboard log file} are saved save_best_only (bool) : if True, the latest best model will not be overwritten (default: False) ''' K.set_image_dim_ordering('tf') self.input_size = input_size self.conditional = conditional self.latent_dim = latent_dim self.nb_pixelcnn_layers = nb_pixelcnn_layers self.nb_filters = nb_filters self.filter_size_1st = filter_size_1st self.filter_size = filter_size self.nb_channels = nb_channels if self.nb_channels == 1: self.loss = 'binary_crossentropy' elif self.nb_channels == 3: self.loss = 'categorical_crossentropy' self.optimizer = optimizer self.es_patience = es_patience self.save_best_only = save_best_only tensorboard_dir = os.path.join(save_root, 'pixelcnn-tensorboard') checkpoint_path = os.path.join(save_root, 'pixelcnn-weights.{epoch:02d}-{val_loss:.4f}.hdf5') self.tensorboard = TensorBoard(log_dir=tensorboard_dir) ### "save_weights_only=False" causes error when exporting model architecture. (json or yaml) self.checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, save_best_only=save_best_only) self.earlystopping = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=0, mode='auto')
def test_convolution_transpose_th(self): border_mode = 'valid' K.set_image_dim_ordering('th') batch = 1 height = 2 width = 2 channels_in = 1 channels_out = 2 kernel_size = 3 strides = (1, 1) input_shape = (channels_in, height, width) input = Input(shape=input_shape, dtype=K.floatx()) conv_layer = ConvolutionTranspose2D(channels_out, kernel_size, kernel_size, dim_ordering=K.image_dim_ordering(), init='one', subsample=strides, border_mode=border_mode, activation='linear') output = conv_layer(input) model = Model(input=[input], output=[output]) model.compile(loss='mean_squared_error', optimizer='sgd') x = np.ones((batch,) + input_shape).astype(K.floatx()) kernel = conv_layer.W output_model = model.predict(x) if K._BACKEND == 'theano': output_shape = conv_layer.get_output_shape_for(K.shape(x)) y = T.nnet.abstract_conv.conv2d_grad_wrt_inputs(theano.shared(x), kernel, output_shape, filter_shape=None, border_mode=border_mode, subsample=strides, filter_flip=True) output = y.eval() else: sess = K.get_session() output_shape = conv_layer.get_output_shape_for(K.shape(x)) output_shape = tf.pack([1, output_shape[2], output_shape[3], output_shape[1]]) x = tf.transpose(x, (0, 2, 3, 1)) kernel = tf.transpose(kernel, (2, 3, 1, 0)) y = tf.nn.conv2d_transpose(x, kernel, output_shape, (1, ) + strides + (1, ), padding=border_mode.upper()) y = tf.transpose(y, (0, 3, 1, 2)) output = sess.run(y) self.assertEqual(output_model.shape, (1, 2, 4, 4)) self.assertEqual(output.shape, (1, 2, 4, 4)) self.assertEqual(True, (output==output_model).all()) # model.fit(x, x + 1, nb_epoch=1)
def test_convolution_transpose_tf(self): border_mode = 'valid' K.set_image_dim_ordering('tf') batch = 1 height = 2 width = 2 channels_in = 1 channels_out = 2 kernel_size = 3 strides = (1, 1) input_shape = (height, width, channels_in) input = Input(shape=input_shape, dtype=K.floatx()) conv_layer = ConvolutionTranspose2D(channels_out, kernel_size, kernel_size, dim_ordering=K.image_dim_ordering(), init='one', subsample=strides, border_mode=border_mode, activation='linear') output = conv_layer(input) model = Model(input=[input], output=[output]) model.compile(loss='mean_squared_error', optimizer='sgd') x = np.ones((batch,) + input_shape).astype(K.floatx()) kernel = conv_layer.W output_model = model.predict(x) if K._BACKEND == 'theano': output_shape = conv_layer.get_output_shape_for(K.shape(x)) output_shape = (1, output_shape[3], output_shape[1], output_shape[2]) x = np.transpose(x, (0, 3, 1, 2)) kernel = T.transpose(kernel, (3, 2, 1, 0)) y = T.nnet.abstract_conv.conv2d_grad_wrt_inputs(theano.shared(x), kernel, output_shape, filter_shape=None, border_mode=border_mode, subsample=strides, filter_flip=True) y = T.transpose(y, (0, 2, 3, 1)) output = y.eval() else: sess = K.get_session() output_shape = conv_layer.get_output_shape_for(K.shape(x)) output_shape = tf.pack([1, output_shape[1], output_shape[2], output_shape[3]]) y = tf.nn.conv2d_transpose(x, kernel, output_shape, (1, ) + strides + (1, ), padding=border_mode.upper()) output = sess.run(y) self.assertEqual(output_model.shape, (1, 4, 4, 2)) self.assertEqual(output.shape, (1, 4, 4, 2)) self.assertEqual(True, (output==output_model).all()) # model.fit(x, x + 1, nb_epoch=1)
def TinyYOLO(input_shape=(3,416,416),num_classes=80,num_priors=5): """Tiny YOLO (v2) architecture # Arguments input_shape: Shape of the input image num_classes: Number of classes (excluding background) # References https://arxiv.org/abs/1612.08242 https://arxiv.org/abs/1506.02640 """ K.set_image_dim_ordering('th') net={} input_tensor = Input(shape=input_shape) net['input'] = input_tensor net['conv1'] = (YOLOConvolution2D(16, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['input']) net['relu1'] = (LeakyReLU(alpha=0.1))(net['conv1']) net['pool1'] = (MaxPooling2D(pool_size=(2, 2),border_mode='valid'))(net['relu1']) net['conv2'] = (YOLOConvolution2D(32, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['pool1']) net['relu2'] = (LeakyReLU(alpha=0.1))(net['conv2']) net['pool2'] = (MaxPooling2D(pool_size=(2, 2),border_mode='valid'))(net['relu2']) net['conv3'] = (YOLOConvolution2D(64, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['pool2']) net['relu3'] = (LeakyReLU(alpha=0.1))(net['conv3']) net['pool3'] = (MaxPooling2D(pool_size=(2, 2),border_mode='valid'))(net['relu3']) net['conv4'] = (YOLOConvolution2D(128, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['pool3']) net['relu4'] = (LeakyReLU(alpha=0.1))(net['conv4']) net['pool4'] = (MaxPooling2D(pool_size=(2, 2),border_mode='valid'))(net['relu4']) net['conv5'] = (YOLOConvolution2D(256, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['pool4']) net['relu5'] = (LeakyReLU(alpha=0.1))(net['conv5']) net['pool5'] = (MaxPooling2D(pool_size=(2, 2),border_mode='valid'))(net['relu5']) net['conv6'] = (YOLOConvolution2D(512, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['pool5']) net['relu6'] = (LeakyReLU(alpha=0.1))(net['conv6']) net['pool6'] = (MaxPooling2D(pool_size=(2, 2),strides=(1,1),border_mode='same'))(net['relu6']) net['conv7'] = (YOLOConvolution2D(1024, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['pool6']) net['relu7'] = (LeakyReLU(alpha=0.1))(net['conv7']) net['conv8'] = (YOLOConvolution2D(1024, 3, 3, border_mode='same',subsample=(1,1), epsilon=0.000001))(net['relu7']) net['relu8'] = (LeakyReLU(alpha=0.1))(net['conv8']) net['conv9'] = (Convolution2D(num_priors*(4+num_classes+1), 1, 1, border_mode='same', subsample=(1,1)))(net['relu8']) model = Model(net['input'], net['conv9']) return model
def vgg_16(weight_path=None): """Build the VGG16 model. # Arguments weight_path: path of the pre_train vgg16 weights If None, weights will be initalized by default # Output shape The VGG16 model """ K.set_image_dim_ordering('th') #Note that the pre_train weight we download is based on thenoa,not tensorflow model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) model.summary() if weight_path: model.load_weights(weight_path) return model