-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathTransD.py
98 lines (92 loc) · 5.22 KB
/
TransD.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#coding:utf-8
import numpy as np
import tensorflow as tf
from Model import Model
def tf_resize(tensor, axis, size):
shape = tensor.get_shape().as_list()
osize = shape[axis]
if osize == size:
return tensor
if (osize > size):
shape[axis] = size
return tf.slice(tensor, begin = (0,) * len(shape), size = shape)
paddings = [[0, 0] for i in range(len(shape))]
paddings[axis][1] = size - osize
return tf.pad(tensor, paddings = paddings)
class TransD(Model):
r'''
TransD constructs a dynamic mapping matrix for each entity-relation pair by considering the diversity of entities and relations simultaneously.
Compared with TransR/CTransR, TransD has fewer parameters and has no matrix vector multiplication.
'''
def _transfer(self, e, t, r):
# return e + tf.reduce_sum(e * t, -1, keep_dims = True) * r
return tf_resize(e, -1, r.get_shape()[-1]) + tf.reduce_sum(e * t, -1, keepdims = True) * r
def _calc(self, h, t, r):
h = tf.nn.l2_normalize(h, -1)
t = tf.nn.l2_normalize(t, -1)
r = tf.nn.l2_normalize(r, -1)
return abs(h + r - t)
def embedding_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#Defining required parameters of the model, including embeddings of entities and relations, entity transfer vectors, and relation transfer vectors
self.ent_embeddings = tf.get_variable(name = "ent_embeddings", shape = [config.entTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embeddings", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.ent_transfer = tf.get_variable(name = "ent_transfer", shape = [config.entTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_transfer = tf.get_variable(name = "rel_transfer", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.parameter_lists = {"ent_embeddings":self.ent_embeddings, \
"rel_embeddings":self.rel_embeddings, \
"ent_transfer":self.ent_transfer, \
"rel_transfer":self.rel_transfer}
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#To get positive triples and negative triples for training
#The shapes of pos_h, pos_t, pos_r are (batch_size, 1)
#The shapes of neg_h, neg_t, neg_r are (batch_size, negative_ent + negative_rel)
pos_h, pos_t, pos_r = self.get_positive_instance(in_batch = True)
neg_h, neg_t, neg_r = self.get_negative_instance(in_batch = True)
#Embedding entities and relations of triples, e.g. pos_h_e, pos_t_e and pos_r_e are embeddings for positive triples
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, neg_r)
#Getting the required parameters to transfer entity embeddings, e.g. pos_h_t, pos_t_t and pos_r_t are transfer parameters for positive triples
pos_h_t = tf.nn.embedding_lookup(self.ent_transfer, pos_h)
pos_t_t = tf.nn.embedding_lookup(self.ent_transfer, pos_t)
pos_r_t = tf.nn.embedding_lookup(self.rel_transfer, pos_r)
neg_h_t = tf.nn.embedding_lookup(self.ent_transfer, neg_h)
neg_t_t = tf.nn.embedding_lookup(self.ent_transfer, neg_t)
neg_r_t = tf.nn.embedding_lookup(self.rel_transfer, neg_r)
#Calculating score functions for all positive triples and negative triples
p_h = self._transfer(pos_h_e, pos_h_t, pos_r_t)
p_t = self._transfer(pos_t_e, pos_t_t, pos_r_t)
p_r = pos_r_e
n_h = self._transfer(neg_h_e, neg_h_t, neg_r_t)
n_t = self._transfer(neg_t_e, neg_t_t, neg_r_t)
n_r = neg_r_e
#The shape of _p_score is (batch_size, 1, hidden_size)
#The shape of _n_score is (batch_size, negative_ent + negative_rel, hidden_size)
_p_score = self._calc(p_h, p_t, p_r)
_n_score = self._calc(n_h, n_t, n_r)
#The shape of p_score is (batch_size, 1, 1)
#The shape of n_score is (batch_size, negative_ent + negative_rel, 1)
p_score = tf.reduce_sum(_p_score, -1, keep_dims = True)
n_score = tf.reduce_sum(_n_score, -1, keep_dims = True)
#Calculating loss to get what the framework will optimize
self.loss = tf.reduce_mean(tf.maximum(p_score - n_score + config.margin, 0))
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_h)
predict_t_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_t)
predict_r_e = tf.nn.embedding_lookup(self.rel_embeddings, predict_r)
predict_h_t = tf.nn.embedding_lookup(self.ent_transfer, predict_h)
predict_t_t = tf.nn.embedding_lookup(self.ent_transfer, predict_t)
predict_r_t = tf.nn.embedding_lookup(self.rel_transfer, predict_r)
h_e = self._transfer(predict_h_e, predict_h_t, predict_r_t)
t_e = self._transfer(predict_t_e, predict_t_t, predict_r_t)
r_e = predict_r_e
self.predict = tf.reduce_sum(self._calc(h_e, t_e, r_e), -1, keep_dims = True)