for k in range(0, self.n_layers):
if self.layer_fun == 'gcf':
ego_embeddings_s = one_graph_layer_gcf(A_fold_hat_s, ego_embeddings_s, weights_s, k)
ego_embeddings_t = one_graph_layer_gcf(A_fold_hat_t, ego_embeddings_t, weights_t, k)
if k >= self.n_layers - self.n_interaction and self.n_interaction > 0:
if self.fuse_type_in == 'la2add':
ego_embeddings_s, ego_embeddings_t = self.s_t_la2add_layer(ego_embeddings_s, ego_embeddings_t,
self.lambda_s, self.lambda_t,
self.domain_laplace)
norm_embeddings_s = tf.math.l2_normalize(ego_embeddings_s, axis=1)
norm_embeddings_t = tf.math.l2_normalize(ego_embeddings_t, axis=1)
if self.connect_way == 'concat':
all_embeddings_s += [norm_embeddings_s]
all_embeddings_t += [norm_embeddings_t]
elif self.connect_way == 'mean':
all_embeddings_s += norm_embeddings_s
all_embeddings_t += norm_embeddings_t
if self.connect_way == 'concat':
all_embeddings_s = tf.concat(all_embeddings_s, 1)
all_embeddings_t = tf.concat(all_embeddings_t, 1)
elif self.connect_way == 'mean':
all_embeddings_s = all_embeddings_s / (self.n_layers + 1)
all_embeddings_t = all_embeddings_t / (self.n_layers + 1)
where
norm_embeddings_s = tf.math.l2_normalize(ego_embeddings_s, axis=1)
norm_embeddings_t = tf.math.l2_normalize(ego_embeddings_t, axis=1)
if self.connect_way == 'concat':
all_embeddings_s += [norm_embeddings_s]
all_embeddings_t += [norm_embeddings_t]
elif self.connect_way == 'mean':
all_embeddings_s += norm_embeddings_s
all_embeddings_t += norm_embeddings_t
why you add norm_embedding to all_embeddings_s?
this code is not consistent with paper !!!