当图像形状较小时,Keras级联匹配形状错误

我正在尝试按照教程来重新创建pix2pix模型,当我使用形状为(256,256,3)的图像时,它运行良好,但是当我使用形状为(64,64,3)的图像时,它失败了。这是代码

# example of pix2pix gan for satellite to map image-to-image translation
from numpy import load
from numpy import zeros
from numpy import ones
from numpy.random import randint
from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import LeakyReLU
from keras.layers import activation
from keras.layers import concatenate
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import LeakyReLU
from matplotlib import pyplot

# define the discriminator model
def define_discriminator(image_shape):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # source image input
    in_src_image = Input(shape=image_shape)
    # target image input
    in_target_image = Input(shape=image_shape)
    # concatenate images channel-wise
    merged = concatenate()([in_src_image,in_target_image])
    # C64
    d = Conv2D(64,(4,4),strides=(2,2),padding='same',kernel_initializer=init)(merged)
    d = LeakyReLU(alpha=0.2)(d)
    # C128
    d = Conv2D(128,kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # C256
    d = Conv2D(256,kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # C512
    d = Conv2D(512,kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # second last output layer
    d = Conv2D(512,kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # patch output
    d = Conv2D(1,kernel_initializer=init)(d)
    patch_out = activation('sigmoid')(d)
    # define model
    model = Model([in_src_image,in_target_image],patch_out)
    # compile model
    opt = Adam(lr=0.0002,beta_1=0.5)
    model.compile(loss='binary_crossentropy',optimizer=opt,loss_weights=[0.5])
    return model

# define an encoder block
def define_encoder_block(layer_in,n_filters,batchnorm=True):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # add downsampling layer
    g = Conv2D(n_filters,kernel_initializer=init)(layer_in)
    # conditionally add batch normalization
    if batchnorm:
        g = BatchNormalization()(g,training=True)
    # leaky relu activation
    g = LeakyReLU(alpha=0.2)(g)
    return g

# define a decoder block
def decoder_block(layer_in,skip_in,dropout=True):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # add upsampling layer
    g = Conv2DTranspose(n_filters,kernel_initializer=init)(layer_in)
    # add batch normalization
    g = BatchNormalization()(g,training=True)
    # conditionally add dropout
    if dropout:
        g = Dropout(0.5)(g,training=True)
    # merge with skip connection
    g = concatenate()([g,skip_in])
    # relu activation
    g = activation('relu')(g)
    return g

# define the standalone generator model
def define_generator(image_shape=(256,3)):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # image input
    in_image = Input(shape=image_shape)
    # encoder model
    e1 = define_encoder_block(in_image,batchnorm=False)
    e2 = define_encoder_block(e1,128)
    e3 = define_encoder_block(e2,256)
    e4 = define_encoder_block(e3,512)
    e5 = define_encoder_block(e4,512)
    e6 = define_encoder_block(e5,512)
    e7 = define_encoder_block(e6,512)
    # bottleneck,no batch norm and relu
    b = Conv2D(512,kernel_initializer=init)(e7)
    b = activation('relu')(b)
    # decoder model
    d1 = decoder_block(b,e7,512)
    d2 = decoder_block(d1,e6,512)
    d3 = decoder_block(d2,e5,512)
    d4 = decoder_block(d3,e4,512,dropout=False)
    d5 = decoder_block(d4,e3,dropout=False)
    d6 = decoder_block(d5,e2,128,dropout=False)
    d7 = decoder_block(d6,e1,dropout=False)
    # output
    g = Conv2DTranspose(3,kernel_initializer=init)(d7)
    out_image = activation('tanh')(g)
    # define model
    model = Model(in_image,out_image)
    return model

# define the combined generator and discriminator model,for updating the generator
def define_gan(g_model,d_model,image_shape):
    # make weights in the discriminator not trainable
    d_model.trainable = False
    # define the source image
    in_src = Input(shape=image_shape)
    # connect the source image to the generator input
    gen_out = g_model(in_src)
    # connect the source input and generator output to the discriminator input
    dis_out = d_model([in_src,gen_out])
    # src image as input,generated image and classification output
    model = Model(in_src,[dis_out,gen_out])
    # compile model
    opt = Adam(lr=0.0002,beta_1=0.5)
    model.compile(loss=['binary_crossentropy','mae'],loss_weights=[1,100])
    return model

# load and prepare training images
def load_real_samples(filename):
    # load compressed arrays
    data = load(filename)
    # unpack arrays
    X1,X2 = data['arr_0'],data['arr_1']
    # scale from [0,255] to [-1,1]
    X1 = (X1 - 127.5) / 127.5
    X2 = (X2 - 127.5) / 127.5
    return [X1,X2]

# select a batch of random samples,returns images and target
def generate_real_samples(dataset,n_samples,patch_shape):
    # unpack dataset
    trainA,trainB = dataset
    # choose random instances
    ix = randint(0,trainA.shape[0],n_samples)
    # retrieve selected images
    X1,X2 = trainA[ix],trainB[ix]
    # generate 'real' class labels (1)
    y = ones((n_samples,patch_shape,1))
    return [X1,X2],y

# generate a batch of images,returns images and targets
def generate_fake_samples(g_model,samples,patch_shape):
    # generate fake instance
    X = g_model.predict(samples)
    # create 'fake' class labels (0)
    y = zeros((len(X),1))
    return X,y

# generate samples and save as a plot and save the model
def summarize_performance(step,g_model,dataset,n_samples=3):
    # select a sample of input images
    [X_realA,X_realB],_ = generate_real_samples(dataset,1)
    # generate a batch of fake samples
    X_fakeB,_ = generate_fake_samples(g_model,X_realA,1)
    # scale all pixels from [-1,1] to [0,1]
    X_realA = (X_realA + 1) / 2.0
    X_realB = (X_realB + 1) / 2.0
    X_fakeB = (X_fakeB + 1) / 2.0
    # plot real source images
    for i in range(n_samples):
        pyplot.subplot(3,1 + i)
        pyplot.axis('off')
        pyplot.imshow(X_realA[i])
    # plot generated target image
    for i in range(n_samples):
        pyplot.subplot(3,1 + n_samples + i)
        pyplot.axis('off')
        pyplot.imshow(X_fakeB[i])
    # plot real target image
    for i in range(n_samples):
        pyplot.subplot(3,1 + n_samples*2 + i)
        pyplot.axis('off')
        pyplot.imshow(X_realB[i])
    # save plot to file
    filename1 = 'plot_%06d.png' % (step+1)
    pyplot.savefig(filename1)
    pyplot.close()
    # save the generator model
    filename2 = 'model_%06d.h5' % (step+1)
    g_model.save(filename2)
    print('>Saved: %s and %s' % (filename1,filename2))

# train pix2pix models
def train(d_model,gan_model,n_epochs=100,n_batch=1):
    # determine the output square shape of the discriminator
    n_patch = d_model.output_shape[1]
    # unpack dataset
    trainA,trainB = dataset
    # calculate the number of batches per training epoch
    bat_per_epo = int(len(trainA) / n_batch)
    # calculate the number of training iterations
    n_steps = bat_per_epo * n_epochs
    # manually enumerate epochs
    for i in range(n_steps):
        # select a batch of real samples
        [X_realA,y_real = generate_real_samples(dataset,n_batch,n_patch)
        # generate a batch of fake samples
        X_fakeB,y_fake = generate_fake_samples(g_model,n_patch)
        # update discriminator for real samples
        d_loss1 = d_model.train_on_batch([X_realA,y_real)
        # update discriminator for generated samples
        d_loss2 = d_model.train_on_batch([X_realA,X_fakeB],y_fake)
        # update the generator
        g_loss,_,_ = gan_model.train_on_batch(X_realA,[y_real,X_realB])
        # summarize performance
        print('>%d,d1[%.3f] d2[%.3f] g[%.3f]' % (i+1,d_loss1,d_loss2,g_loss))
        # summarize model performance
        if (i+1) % (bat_per_epo * 10) == 0:
            summarize_performance(i,dataset)

# load image data
dataset = load_real_samples('maps_256.npz')
print('Loaded',dataset[0].shape,dataset[1].shape)
# define input shape based on the loaded dataset
image_shape = dataset[0].shape[1:]
# define the models
d_model = define_discriminator(image_shape)
g_model = define_generator(image_shape)
# define the composite model
gan_model = define_gan(g_model,image_shape)
# train model
train(d_model,dataset)

错误是

ValueError: A `concatenate` layer requires inputs with matching shapes except for the concat axis. Got inputs shapes: [(None,2,512),(None,1,512)]

Full error

ValueError                                Traceback (most recent call last)
<ipython-input-52-c2af6b6feb40> in <module>
    236 # define the models
    237 d_model = define_discriminator(image_shape)
--> 238 g_model = define_generator(image_shape)
    239 # define the composite model
    240 gan_model = define_gan(g_model,image_shape)

<ipython-input-52-c2af6b6feb40> in define_generator(image_shape)
    105         b = activation('relu')(b)
    106         # decoder model
--> 107         d1 = decoder_block(b,512)
    108         d2 = decoder_block(d1,512)
    109         d3 = decoder_block(d2,512)

<ipython-input-52-c2af6b6feb40> in decoder_block(layer_in,dropout)
     82                 g = Dropout(0.5)(g,training=True)
     83         # merge with skip connection
---> 84         g = concatenate()([g,skip_in])
     85         # relu activation
     86         g = activation('relu')(g)

/usr/local/Cellar/jupyterlab/1.2.4/libexec/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in symbolic_fn_wrapper(*args,**kwargs)
     73         if _SYMBOLIC_SCOPE.value:
     74             with get_graph().as_default():
---> 75                 return func(*args,**kwargs)
     76         else:
     77             return func(*args,**kwargs)

/usr/local/Cellar/jupyterlab/1.2.4/libexec/lib/python3.7/site-packages/keras/engine/base_layer.py in __call__(self,inputs,**kwargs)
    461                                          'You can build it manually via: '
    462                                          '`layer.build(batch_input_shape)`')
--> 463                 self.build(unpack_singleton(input_shapes))
    464                 self.built = True
    465 

/usr/local/Cellar/jupyterlab/1.2.4/libexec/lib/python3.7/site-packages/keras/layers/merge.py in build(self,input_shape)
    360                              'inputs with matching shapes '
    361                              'except for the concat axis. '
--> 362                              'Got inputs shapes: %s' % (input_shape))
    363 
    364     def _merge_function(self,inputs):

ValueError: A `concatenate` layer requires inputs with matching shapes except for the concat axis. Got inputs shapes: [(None,1)]

令人困惑的部分是,当image_shape为(256,3)或(512,3)时,它可以正常工作。我想念什么?

yyy830ning 回答:当图像形状较小时,Keras级联匹配形状错误

暂时没有好的解决方案,如果你有好的解决方案,请发邮件至:iooj@foxmail.com
本文链接:https://www.f2er.com/2764856.html

大家都在问