-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathTransH.py
82 lines (74 loc) · 4.07 KB
/
TransH.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#coding:utf-8
import numpy as np
import tensorflow as tf
from Model import Model
class TransH(Model):
r'''
To preserve the mapping propertities of 1-N/N-1/N-N relations,
TransH inperprets a relation as a translating operation on a hyperplane.
'''
def _transfer(self, e, n):
n = tf.nn.l2_normalize(n, -1)
return e - tf.reduce_sum(e * n, -1, keepdims = True) * n
def _calc(self, h, t, r, flag = True):
h = tf.nn.l2_normalize(h, -1)
t = tf.nn.l2_normalize(t, -1)
r = tf.nn.l2_normalize(r, -1)
return abs(h + r - t)
def embedding_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#Defining required parameters of the model, including embeddings of entities and relations, and normal vectors of planes
self.ent_embeddings = tf.get_variable(name = "ent_embeddings", shape = [config.entTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embeddings", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.normal_vectors = tf.get_variable(name = "normal_vectors", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.parameter_lists = {"ent_embeddings":self.ent_embeddings, \
"rel_embeddings":self.rel_embeddings, \
"normal_vectors":self.normal_vectors}
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#To get positive triples and negative triples for training
#The shapes of pos_h, pos_t, pos_r are (batch_size, 1)
#The shapes of neg_h, neg_t, neg_r are (batch_size, negative_ent + negative_rel)
pos_h, pos_t, pos_r = self.get_positive_instance(in_batch = True)
neg_h, neg_t, neg_r = self.get_negative_instance(in_batch = True)
#Embedding entities and relations of triples, e.g. pos_h_e, pos_t_e and pos_r_e are embeddings for positive triples
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, neg_r)
#Getting the required normal vectors of planes to transfer entity embeddings
pos_norm = tf.nn.embedding_lookup(self.normal_vectors, pos_r)
neg_norm = tf.nn.embedding_lookup(self.normal_vectors, neg_r)
#Calculating score functions for all positive triples and negative triples
p_h = self._transfer(pos_h_e, pos_norm)
p_t = self._transfer(pos_t_e, pos_norm)
p_r = pos_r_e
n_h = self._transfer(neg_h_e, neg_norm)
n_t = self._transfer(neg_t_e, neg_norm)
n_r = neg_r_e
#Calculating score functions for all positive triples and negative triples
#The shape of _p_score is (batch_size, 1, hidden_size)
#The shape of _n_score is (batch_size, negative_ent + negative_rel, hidden_size)
_p_score = self._calc(p_h, p_t, p_r)
_n_score = self._calc(n_h, n_t, n_r)
#The shape of p_score is (batch_size, 1, 1)
#The shape of n_score is (batch_size, negative_ent + negative_rel, 1)
p_score = tf.reduce_sum(_p_score, -1, keep_dims = True)
n_score = tf.reduce_sum(_n_score, -1, keep_dims = True)
#Calculating loss to get what the framework will optimize
self.loss = tf.reduce_mean(tf.maximum(p_score - n_score + config.margin, 0))
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_h)
predict_t_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_t)
predict_r_e = tf.nn.embedding_lookup(self.rel_embeddings, predict_r)
predict_norm = tf.nn.embedding_lookup(self.normal_vectors, predict_r)
h_e = self._transfer(predict_h_e, predict_norm)
t_e = self._transfer(predict_t_e, predict_norm)
r_e = predict_r_e
self.predict = tf.reduce_sum(self._calc(h_e, t_e, r_e), -1, keepdims = True)