我想连接层(位置嵌入/带有标记嵌入的编码),以便将其用于使用注意机制(转换器)的神经网络,但它不起作用。
在注释(调用方法)中,有我想要的代码。通过加法,将大小为 (None,15,16) 的输入 x 添加到大小为 (15,16) --> x + 位置的位置编码中是没有问题的。但是对于 tf.concat([x,positions],axis=....) 它实际上是一个问题。有人可以帮忙吗?
class concatenatedTokenAndPositionalEmbedding(layers.Layer):
def __init__(self,maxlen,vocab_size,embed_dim,**kwargs):
super(concatenatedTokenAndPositionalEmbedding,self).__init__(**kwargs)
self.maxlen = maxlen
self.vocab_size = vocab_size
self.embed_dim = embed_dim
def build(self,input_shape):
self.token_emb = layers.Embedding(input_dim=self.vocab_size,output_dim=self.embed_dim)
self.pos_emb = layers.Embedding(input_dim=self.maxlen,output_dim=self.embed_dim)
def get_config(self):
config = super(concatenatedTokenAndPositionalEmbedding,self).get_config()
config.update({"maxlen": self.maxlen,"vocab_size": self.vocab_size,"embed_dim": self.embed_dim})
return config
def call(self,x):
# todo: concat does not work somehow
maxlen = tf.shape(x)[-1]
# positions = tf.zeros((self.maxlen,),dtype=tf.float32)
positions = tf.range(start=0,limit=self.maxlen,delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
#tf.print(positions)
print("Pos.:",positions.shape) # prints (15,16)
print("x shape.:",x.shape) # prints (None,16)
# do this tf.concat([x,0) or this
# positions = tf.expand_dims(positions,0)
# positions.set_dim([None,16])
# output = layers.concatenate(axis=1)([x,positions])
# print("Pos.:",positions.shape)
# print("x shape.:",x.shape)
# tf.print(output.shape)
return x + positions # put output here instead
它给了我:
ValueError: Shape must be rank 3 but is rank 2 for '{{node concatenated_token_and_positional_embedding/concat}} = concatV2[N=2,T=DT_FLOAT,Tidx=DT_INT32](concatenated_token_and_positional_embedding/embedding/embedding_lookup/Identity_1,concatenated_token_and_positional_embedding/embedding_1/embedding_lookup/Identity_1,concatenated_token_and_positional_embedding/concat/axis)' with input shapes: [?,16],[15,[].