float的attr'T'的值不在允许的值列表中:int32,int64

class Bilinearupsampling(Layer):

    def __init__(self,upsampling=(2,2),output_size=None,data_format=None,**kwargs):

        super(Bilinearupsampling,self).__init__(**kwargs)

        self.data_format = K.normalize_data_format(data_format)
        self.input_spec = InputSpec(ndim=4)
        if output_size:
            self.output_size = conv_utils.normalize_tuple(
                output_size,2,'output_size')
            self.upsampling = None
        else:
            self.output_size = None
            self.upsampling = conv_utils.normalize_tuple(
                upsampling,'upsampling')

    def compute_output_shape(self,input_shape):
        if self.upsampling:
            height = self.upsampling[0] * \
                input_shape[1] if input_shape[1] is not None else None
            width = self.upsampling[1] * \
                input_shape[2] if input_shape[2] is not None else None
        else:
            height = self.output_size[0]
            width = self.output_size[1]
        return (input_shape[0],height,width,input_shape[3])

    def call(self,inputs):
        if self.upsampling:
            return tf.image.resize_bilinear(inputs,(int(inputs.shape[1] * self.upsampling[0]),int(inputs.shape[2] * self.upsampling[1])))
                                              #align_corners=True)
        else:
            return tf.image.resize_bilinear(inputs,(self.output_size[0],self.output_size[1]))
                                              #align_corners=True)

    def get_config(self):
        config = {'upsampling': self.upsampling,'output_size': self.output_size,'data_format': self.data_format}
        base_config = super(Bilinearupsampling,self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

class BatchNorm(BatchNormalization):
    def call(self,inputs,training=None):
          return super(self.__class__,self).call(inputs,training=True)

def BN(input_tensor,block_id):
    bn = BatchNorm(name=block_id+'_BN')(input_tensor)
    a = activation('relu',name=block_id+'_relu')(bn)
    return a

def l1_reg(weight_matrix):
    return K.mean(weight_matrix)

class Repeat(Layer):
    def __init__(self,repeat_list,**kwargs):
        super(Repeat,self).__init__(**kwargs)
        self.repeat_list = repeat_list

    def call(self,inputs):
        outputs = tf.tile(inputs,self.repeat_list)
        return outputs
    def get_config(self):
        config = {
            'repeat_list': self.repeat_list
        }
        base_config = super(Repeat,self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self,input_shape):
        output_shape = [None]
        for i in range(1,len(input_shape)):
            output_shape.append(input_shape[i]*self.repeat_list[i])
        return tuple(output_shape)

def SpatialAttention(inputs,name):
    k = 9
    H,W,C = map(int,inputs.get_shape()[1:])
    attention1 = Conv2D(C / 2,(1,k),padding='same',name=name+'_1_conv1')(inputs)
    attention1 = BN(attention1,'attention1_1')
    attention1 = Conv2D(1,(k,1),name=name + '_1_conv2')(attention1)
    attention1 = BN(attention1,'attention1_2')
    attention2 = Conv2D(C / 2,name=name + '_2_conv1')(inputs)
    attention2 = BN(attention2,'attention2_1')
    attention2 = Conv2D(1,name=name + '_2_conv2')(attention2)
    attention2 = BN(attention2,'attention2_2')
    attention = Add(name=name+'_add')([attention1,attention2])
    attention = activation('sigmoid')(attention)
    attention = Repeat(repeat_list=[1,1,C])(attention)
    return attention

def ChannelWiseAttention(inputs,name):
    H,inputs.get_shape()[1:])
    attention = GlobalAveragePooling2D(name=name+'_GlobalAveragePooling2D')(inputs)
    attention = Dense(int(C / 4),activation='relu')(attention)
    attention = Dense(C,activation='sigmoid',activity_regularizer=l1_reg)(attention)
    attention = Reshape((1,C),name=name+'_reshape')(attention)
    attention = Repeat(repeat_list=[1,H,1],name=name+'_repeat')(attention)
    attention = Multiply(name=name + '_multiply')([attention,inputs])
    return attention

class BatchNorm(BatchNormalization):
    def call(self,training=True)

class Copy(Layer):
    def call(self,**kwargs):
        copy = tf.identity(inputs)
        return copy
    def compute_output_shape(self,input_shape):
        return input_shape

class layertile(Layer):
    def call(self,**kwargs):
        image = tf.reduce_mean(inputs,axis=-1)
        image = tf.expand_dims(image,-1)
        image = tf.tile(image,[1,32])
        return image

    def compute_output_shape(self,input_shape):
        output_shape = list(input_shape)[:-1] + [32]
        return tuple(output_shape)


def BN(input_tensor,name=block_id+'_relu')(bn)
    return a

def AtrousBlock(input_tensor,filters,rate,block_id,stride=1):
    x = Conv2D(filters,(3,3),strides=(stride,stride),dilation_rate=(rate,rate),use_bias=False,name=block_id + '_dilation')(input_tensor)
    return x


def CFE(input_tensor,block_id):
    rate = [3,5,7]
    cfe0 = Conv2D(filters,name=block_id + '_cfe0')(
        input_tensor)
    cfe1 = AtrousBlock(input_tensor,rate[0],block_id + '_cfe1')
    cfe2 = AtrousBlock(input_tensor,rate[1],block_id + '_cfe2')
    cfe3 = AtrousBlock(input_tensor,rate[2],block_id + '_cfe3')
    cfe_concat = concatenate(name=block_id + 'concatcfe',axis=-1)([cfe0,cfe1,cfe2,cfe3])
    cfe_concat = BN(cfe_concat,block_id)
    return cfe_concat


def VGG16(img_input,dropout=False,with_CPFE=False,with_CA=False,with_SA=False,droup_rate=0.3):
    # Block 1
    x = Conv2D(64,activation='relu',name='block1_conv1')(img_input)
    x = Conv2D(64,name='block1_conv2')(x)
    C1 = x
    x = MaxPooling2D((2,strides=(2,name='block1_pool')(x)

    if dropout:
        x = Dropout(droup_rate)(x)
    # Block 2
    x = Conv2D(128,name='block2_conv1')(x)
    x = Conv2D(128,name='block2_conv2')(x)
    C2 = x
    x = MaxPooling2D((2,name='block2_pool')(x)

    if dropout:
        x = Dropout(droup_rate)(x)
    # Block 3
    x = Conv2D(256,name='block3_conv1')(x)
    x = Conv2D(256,name='block3_conv2')(x)
    x = Conv2D(256,name='block3_conv3')(x)
    C3 = x
    x = MaxPooling2D((2,name='block3_pool')(x)

    if dropout:
        x = Dropout(droup_rate)(x)
    # Block 4
    x = Conv2D(512,name='block4_conv1')(x)
    x = Conv2D(512,name='block4_conv2')(x)
    x = Conv2D(512,name='block4_conv3')(x)
    C4 = x
    x = MaxPooling2D((2,name='block4_pool')(x)

    if dropout:
        x = Dropout(droup_rate)(x)
    # Block 5
    x = Conv2D(512,name='block5_conv1')(x)
    x = Conv2D(512,name='block5_conv2')(x)
    x = Conv2D(512,name='block5_conv3')(x)
    if dropout:
        x = Dropout(droup_rate)(x)
    C5 = x
    C1 = Conv2D(64,name='C1_conv')(C1)
    C1 = BN(C1,'C1_BN')
    C2 = Conv2D(64,name='C2_conv')(C2)
    C2 = BN(C2,'C2_BN')
    if with_CPFE:
        C3_cfe = CFE(C3,32,'C3_cfe')
        C4_cfe = CFE(C4,'C4_cfe')
        C5_cfe = CFE(C5,'C5_cfe')
        C5_cfe = Bilinearupsampling(upsampling=(4,4),name='C5_cfe_up4')(C5_cfe)
        C4_cfe = Bilinearupsampling(upsampling=(2,name='C4_cfe_up2')(C4_cfe)
        C345 = concatenate(name='C345_aspp_concat',axis=-1)([C3_cfe,C4_cfe,C5_cfe])
        if with_CA:
            C345 = ChannelWiseAttention(C345,name='C345_ChannelWiseAttention_withcpfe')
    C345 = Conv2D(64,name='C345_conv')(C345)
    C345 = BN(C345,'C345')
    C345 = Bilinearupsampling(upsampling=(4,name='C345_up4')(C345)

    if with_SA:
        SA = SpatialAttention(C345,'spatial_attention')
        C2 = Bilinearupsampling(upsampling=(2,name='C2_up2')(C2)
        C12 = concatenate(name='C12_concat',axis=-1)([C1,C2])
        C12 = Conv2D(64,name='C12_conv')(C12)
        C12 = BN(C12,'C12')
        C12 = Multiply(name='C12_atten_mutiply')([SA,C12])
    fea = concatenate(name='fuse_concat',axis=-1)([C12,C345])
    sa = Conv2D(1,name='sa')(fea)

    model = Model(inputs=img_input,outputs=sa,name="BaseModel")
    return model

def padding(x):
    h,w,c = x.shape
    size = max(h,w)
    paddingh = (size-h)//2
    paddingw = (size-w)//2
    temp_x = np.zeros((size,size,c))
    temp_x[paddingh:h+paddingh,paddingw:w+paddingw,:] = x
    return temp_x

def load_image(path):
    x = cv2.imread(path)
    sh = x.shape
    x = np.array(x,dtype=np.int32)
    x = x[...,::-1]
    # Zero-center by mean pixel
    x[...,0] -= 103.939
    x[...,1] -= 116.779
    x[...,2] -= 123.68
    x = padding(x)
    x = cv2.resize(x,target_size,interpolation=cv2.INTER_LINEAR)
    x = np.expand_dims(x,0)
    return x,sh

def cut(pridict,shape):
    h,c = shape
    size = max(h,w)
    pridict = cv2.resize(pridict,(size,size))
    paddingh = (size - h) // 2
    paddingw = (size - w) // 2
    return pridict[paddingh:h + paddingh,paddingw:w + paddingw]

def sigmoid(x):
    return 1/(1 + np.exp(-x))

def getres(pridict,shape):
    pridict = sigmoid(pridict)*255
    pridict = np.array(pridict,dtype=np.uint8)
    pridict = np.squeeze(pridict)
    pridict = cut(pridict,shape)
    return pridict

def laplace_edge(x):
    laplace = np.array([[-1,-1,-1],[-1,8,-1]])
    edge = cv2.filter2D(x/255.,laplace)
    edge = np.maximum(np.tanh(edge),0)
    edge = edge * 255
    edge = np.array(edge,dtype=np.uint8)
    return edge

model_name = 'x.h5'

target_size = (256,256)

dropout = False
with_CPFE = True
with_CA = True
with_SA = True

if target_size[0 ] % 32 != 0 or target_size[1] % 32 != 0:
    raise ValueError('Image height and wight must be a multiple of 32')

model_input = Input(shape=(target_size[0],target_size[1],3))
model = VGG16(model_input,dropout=dropout,with_CPFE=with_CPFE,with_CA=with_CA,with_SA=with_SA)
model.load_weights(model_name,by_name=True)

for layer in model.layers:
    layer.trainable = False

image_path = 'x.jpg'
img,shape = load_image(image_path)
img = np.array(img,dtype=np.int32)
sa = model.predict(img)
sa = getres(sa,shape)
plt.title('saliency')
plt.subplot(131)
plt.imshow(cv2.imread(image_path))
plt.subplot(132)
plt.imshow(sa,cmap='gray')
plt.subplot(133)
edge = laplace_edge(sa)
plt.imshow(edge,cmap='gray')
plt.show()

错误: float的attr'T'的值不在允许的值列表中:int32,int64     ; NodeDef:{{node RandomUniform}};运算输出:dtype; attr = seed:int,默认= 0; attr = seed2:int,默认= 0; attr = dtype:type,allowed = [DT_HALF,DT_BFLOAT16,DT_FLOAT,DT_DOUBLE]; attr = T:type,allowed = [DT_INT32,DT_INT64]; is_stateful = true> [Op:RandomUniform]

我已经将C更改为int ...真的不知道还有什么...

hushilan325 回答:float的attr'T'的值不在允许的值列表中:int32,int64

暂时没有好的解决方案,如果你有好的解决方案,请发邮件至:iooj@foxmail.com
本文链接:https://www.f2er.com/3128380.html

大家都在问