Skip to content

Commit

Permalink
Update hypergraph models
Browse files Browse the repository at this point in the history
  • Loading branch information
gbg141 committed Apr 12, 2024
1 parent f3fd0f6 commit 74b77e4
Show file tree
Hide file tree
Showing 11 changed files with 69 additions and 26 deletions.
18 changes: 11 additions & 7 deletions topomodelx/nn/hypergraph/allset.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,20 @@ class AllSet(torch.nn.Module):
Dimension of the input features.
hidden_channels : int
Dimension of the hidden features.
n_layers : int, default: 2
n_layers : int, default = 2
Number of AllSet layers in the network.
layer_dropout: float, default: 0.2
layer_dropout : float, default = 0.2
Dropout probability for the AllSet layer.
mlp_num_layers : int, default: 2
mlp_num_layers : int, default = 2
Number of layers in the MLP.
mlp_dropout : float, default: 0.0
Dropout probability for the MLP.
mlp_activation : torch.nn.Module, default: None
mlp_activation : torch.nn.Module, default = None
Activation function in the MLP.
mlp_norm : bool, default: False
mlp_dropout : float, default = 0.0
Dropout probability for the MLP.
mlp_norm : bool, default = False
Whether to apply input normalization in the MLP.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -47,6 +49,7 @@ def __init__(
mlp_activation=None,
mlp_dropout=0.0,
mlp_norm=None,
**kwargs,
):
super().__init__()

Expand All @@ -59,6 +62,7 @@ def __init__(
mlp_activation=mlp_activation,
mlp_dropout=mlp_dropout,
mlp_norm=mlp_norm,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
4 changes: 4 additions & 0 deletions topomodelx/nn/hypergraph/allset_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ class AllSetTransformer(torch.nn.Module):
Number of layers in the MLP.
mlp_dropout : float, default: 0.2
Dropout probability in the MLP.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -44,6 +46,7 @@ def __init__(
dropout=0.2,
mlp_num_layers=2,
mlp_dropout=0.2,
**kwargs,
):
super().__init__()

Expand All @@ -55,6 +58,7 @@ def __init__(
heads=heads,
mlp_num_layers=mlp_num_layers,
mlp_dropout=mlp_dropout,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
6 changes: 5 additions & 1 deletion topomodelx/nn/hypergraph/dhgcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,10 @@ class DHGCN(torch.nn.Module):
Dimension of the input features.
hidden_channels : int
Dimension of the hidden features.
n_layer : int, default = 2
n_layers : int, default = 2
Amount of message passing layers.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -31,6 +33,7 @@ def __init__(
in_channels,
hidden_channels,
n_layers=1,
**kwargs,
):
super().__init__()

Expand All @@ -39,6 +42,7 @@ def __init__(
in_channels=in_channels if i == 0 else hidden_channels,
intermediate_channels=hidden_channels,
out_channels=hidden_channels,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
12 changes: 7 additions & 5 deletions topomodelx/nn/hypergraph/hmpnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,18 @@ class HMPNN(torch.nn.Module):
Parameters
----------
in_channels : int
Dimension of input features
Dimension of input features.
hidden_channels : Tuple[int]
A tuple of hidden feature dimensions to gradually reduce node/hyperedge representations feature
dimension from in_features to the last item in the tuple.
num_classes: int
Number of classes
n_layers : int, default = 2
Number of HMPNNLayer layers.
adjacency_dropout_rate: int, default = 0.7
adjacency_dropout_rate : int, default = 0.7
Adjacency dropout rate.
regular_dropout_rate : int, default = 0.5
Regular dropout rate applied on features.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -40,6 +40,7 @@ def __init__(
n_layers=2,
adjacency_dropout_rate=0.7,
regular_dropout_rate=0.5,
**kwargs,
):
super().__init__()

Expand All @@ -52,6 +53,7 @@ def __init__(
hidden_channels,
adjacency_dropout=adjacency_dropout_rate,
updating_dropout=regular_dropout_rate,
**kwargs,
)
for _ in range(n_layers)
]
Expand All @@ -66,7 +68,7 @@ def forward(self, x_0, x_1, incidence_1):
Node features.
x_1 : torch.Tensor, shape = (n_hyperedges, in_features)
Hyperedge features.
incidence_1: torch.sparse.Tensor, shape = (n_nodes, n_hyperedges)
incidence_1 : torch.sparse.Tensor, shape = (n_nodes, n_hyperedges)
Incidence matrix (B1).
Returns
Expand Down
11 changes: 10 additions & 1 deletion topomodelx/nn/hypergraph/hnhn.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ class HNHN(torch.nn.Module):
Number of HNHN message passing layers.
layer_drop : float, default = 0.2
Dropout rate for the hidden features.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -30,7 +32,13 @@ class HNHN(torch.nn.Module):
"""

def __init__(
self, in_channels, hidden_channels, incidence_1, n_layers=2, layer_drop=0.2
self,
in_channels,
hidden_channels,
incidence_1,
n_layers=2,
layer_drop=0.2,
**kwargs,
):
super().__init__()

Expand All @@ -39,6 +47,7 @@ def __init__(
in_channels=in_channels if i == 0 else hidden_channels,
hidden_channels=hidden_channels,
incidence_1=incidence_1,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
10 changes: 7 additions & 3 deletions topomodelx/nn/hypergraph/hypergat.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,10 @@ class HyperGAT(torch.nn.Module):
Dimension of the hidden features.
n_layers : int, default = 2
Amount of message passing layers.
layer_drop: float, default = 0.2
layer_drop : float, default = 0.2
Dropout rate for the hidden features.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -32,13 +34,15 @@ def __init__(
hidden_channels,
n_layers=2,
layer_drop=0.2,
**kwargs,
):
super().__init__()

self.layers = torch.nn.ModuleList(
HyperGATLayer(
in_channels=in_channels if i == 0 else hidden_channels,
hidden_channels=hidden_channels,
**kwargs,
)
for i in range(n_layers)
)
Expand All @@ -49,8 +53,8 @@ def forward(self, x_0, incidence_1):
Parameters
----------
x_1 : torch.Tensor, shape = (n_edges, channels_edge)
Edge features.
x_0 : torch.Tensor, shape = (n_nodes, channels_nodes)
Node features.
incidence_1 : torch.Tensor, shape = (n_nodes, n_edges)
Boundary matrix of rank 1.
Expand Down
14 changes: 8 additions & 6 deletions topomodelx/nn/hypergraph/hypersage.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,12 @@ class HyperSAGE(torch.nn.Module):
Dimension of the input features.
hidden_channels : int
Dimension of the hidden features.
n_layer : int, default = 2
n_layers : int, default = 2
Amount of message passing layers.
alpha : int, default = -1
Max number of nodes in a neighborhood to consider. If -1 it considers all the nodes.ù
Max number of nodes in a neighborhood to consider. If -1 it considers all the nodes.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -39,14 +41,14 @@ def __init__(self, in_channels, hidden_channels, n_layers=2, alpha=-1, **kwargs)
for i in range(n_layers)
)

def forward(self, x_0, incidence):
def forward(self, x_0, incidence_1):
"""Forward computation through layers, then linear layer, then global max pooling.
Parameters
----------
x : torch.Tensor, shape = (n_nodes, features_nodes)
x_0 : torch.Tensor, shape = (n_nodes, features_nodes)
Edge features.
incidence : torch.Tensor, shape = (n_nodes, n_edges)
incidence_1 : torch.Tensor, shape = (n_nodes, n_edges)
Boundary matrix of rank 1.
Returns
Expand All @@ -55,6 +57,6 @@ def forward(self, x_0, incidence):
Label assigned to whole complex.
"""
for layer in self.layers:
x_0 = layer.forward(x_0, incidence)
x_0 = layer.forward(x_0, incidence_1)

return x_0
4 changes: 4 additions & 0 deletions topomodelx/nn/hypergraph/unigcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ class UniGCN(torch.nn.Module):
Dimension of the hidden features.
n_layers : int, default = 2
Amount of message passing layers.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -30,13 +32,15 @@ def __init__(
in_channels,
hidden_channels,
n_layers=2,
**kwargs,
):
super().__init__()

self.layers = torch.nn.ModuleList(
UniGCNLayer(
in_channels=in_channels if i == 0 else hidden_channels,
hidden_channels=hidden_channels,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
4 changes: 4 additions & 0 deletions topomodelx/nn/hypergraph/unigcnii.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ class UniGCNII(torch.nn.Module):
Dropout rate for the hidden features.
use_norm : bool, default=False
Whether to apply row normalization after every layer.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -47,6 +49,7 @@ def __init__(
input_drop=0.2,
layer_drop=0.2,
use_norm=False,
**kwargs,
):
super().__init__()
layers = []
Expand All @@ -65,6 +68,7 @@ def __init__(
alpha=alpha,
beta=beta,
use_norm=use_norm,
**kwargs,
)
)

Expand Down
5 changes: 4 additions & 1 deletion topomodelx/nn/hypergraph/unigin.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ class UniGIN(torch.nn.Module):
Whether to make eps a trainable parameter.
use_norm : bool, default=False
Whether to apply row normalization after every layer.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -46,6 +47,7 @@ def __init__(
eps=0,
train_eps=False,
use_norm=False,
**kwargs,
):
super().__init__()

Expand All @@ -60,6 +62,7 @@ def __init__(
eps=eps,
train_eps=train_eps,
use_norm=use_norm,
**kwargs,
)
for _ in range(n_layers)
)
Expand Down
7 changes: 5 additions & 2 deletions topomodelx/nn/hypergraph/unisage.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,10 @@ class UniSAGE(torch.nn.Module):
Aggregator function for hyperedges.
v_aggr : Literal["sum", "mean",], default="mean"
Aggregator function for nodes.
use_norm : boolean
use_norm : bool
Whether to apply row normalization after every layer.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -54,6 +55,7 @@ def __init__(
"mean",
] = "mean",
use_norm: bool = False,
**kwargs,
):
super().__init__()

Expand All @@ -67,6 +69,7 @@ def __init__(
e_aggr=e_aggr,
v_aggr=v_aggr,
use_norm=use_norm,
**kwargs,
)
for i in range(n_layers)
)
Expand Down

0 comments on commit 74b77e4

Please sign in to comment.