-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathAttention_old.py
30 lines (28 loc) · 1.07 KB
/
Attention_old.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import torch
import torch.nn as nn
from torch.nn import init
import numpy as np
import random
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, embedding_dims):
super(Attention, self).__init__()
self.embed_dim = embedding_dims
self.bilinear = nn.Bilinear(self.embed_dim, self.embed_dim, 1)
self.att1 = nn.Linear(self.embed_dim * 2, self.embed_dim)
# self.att2 = nn.Linear(self.embed_dim, self.embed_dim)
self.att3 = nn.Linear(self.embed_dim, 1)
self.softmax = nn.Softmax(0)
self.dropout = nn.Dropout(p=0.0)
def forward(self, node1, u_rep, num_neighs):
uv_reps = u_rep.repeat(num_neighs, 1)
x = torch.cat((node1, uv_reps), 1)
x = F.relu(self.att1(x))
x = self.dropout(x) #F.dropout(x, training=self.training)
#x = F.relu(self.att2(x))
# x = self.dropout(x) #F.dropout(x, training=self.training)
x = self.att3(x)
# print(x.size())
att = F.softmax(x, 0)
#print(att.size())
return att