diff --git a/topomodelx/nn/hypergraph/allset.py b/topomodelx/nn/hypergraph/allset.py index 219f9049..0219747f 100644 --- a/topomodelx/nn/hypergraph/allset.py +++ b/topomodelx/nn/hypergraph/allset.py @@ -16,18 +16,20 @@ class AllSet(torch.nn.Module): Dimension of the input features. hidden_channels : int Dimension of the hidden features. - n_layers : int, default: 2 + n_layers : int, default = 2 Number of AllSet layers in the network. - layer_dropout: float, default: 0.2 + layer_dropout : float, default = 0.2 Dropout probability for the AllSet layer. - mlp_num_layers : int, default: 2 + mlp_num_layers : int, default = 2 Number of layers in the MLP. - mlp_dropout : float, default: 0.0 - Dropout probability for the MLP. - mlp_activation : torch.nn.Module, default: None + mlp_activation : torch.nn.Module, default = None Activation function in the MLP. - mlp_norm : bool, default: False + mlp_dropout : float, default = 0.0 + Dropout probability for the MLP. + mlp_norm : bool, default = False Whether to apply input normalization in the MLP. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -47,6 +49,7 @@ def __init__( mlp_activation=None, mlp_dropout=0.0, mlp_norm=None, + **kwargs, ): super().__init__() @@ -59,6 +62,7 @@ def __init__( mlp_activation=mlp_activation, mlp_dropout=mlp_dropout, mlp_norm=mlp_norm, + **kwargs, ) for i in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/allset_layer.py b/topomodelx/nn/hypergraph/allset_layer.py index e89b6d39..5d408506 100644 --- a/topomodelx/nn/hypergraph/allset_layer.py +++ b/topomodelx/nn/hypergraph/allset_layer.py @@ -27,6 +27,8 @@ class AllSetLayer(nn.Module): Dropout probability in the MLP. mlp_norm : str or None, optional Type of layer normalization in the MLP. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -45,6 +47,7 @@ def __init__( mlp_activation=nn.ReLU, mlp_dropout: float = 0.0, mlp_norm=None, + **kwargs, ) -> None: super().__init__() @@ -60,6 +63,7 @@ def __init__( mlp_activation=mlp_activation, mlp_dropout=mlp_dropout, mlp_norm=mlp_norm, + **kwargs, ) self.edge2vertex = AllSetBlock( @@ -70,6 +74,7 @@ def __init__( mlp_activation=mlp_activation, mlp_dropout=mlp_dropout, mlp_norm=mlp_norm, + **kwargs, ) def reset_parameters(self) -> None: @@ -103,7 +108,7 @@ def forward(self, x_0, incidence_1): Parameters ---------- - x : torch.Tensor, shape = (n_nodes, channels) + x_0 : torch.Tensor, shape = (n_nodes, channels) Node input features. incidence_1 : torch.sparse, shape = (n_nodes, n_hyperedges) Incidence matrix :math:`B_1` mapping hyperedges to nodes. @@ -200,6 +205,8 @@ class AllSetBlock(nn.Module): Dropout probability in the MLP. mlp_norm : callable or None, optional Type of layer normalization in the MLP. + **kwargs : optional + Additional arguments for the block modules. """ encoder: MLP | nn.Identity @@ -214,6 +221,7 @@ def __init__( mlp_activation=nn.ReLU, mlp_dropout: float = 0.0, mlp_norm=None, + **kwargs, ) -> None: super().__init__() @@ -255,7 +263,7 @@ def reset_parameters(self) -> None: self.decoder.reset_parameters() self.conv.reset_parameters() - def forward(self, x, incidence): + def forward(self, x_0, incidence_1): """ Forward computation. @@ -263,7 +271,7 @@ def forward(self, x, incidence): ---------- x_0 : torch.Tensor Input node features. - incidence : torch.sparse + incidence_1 : torch.sparse Incidence matrix between node/hyperedges. Returns @@ -271,7 +279,7 @@ def forward(self, x, incidence): torch.Tensor Output features. """ - x = F.relu(self.encoder(x)) + x = F.relu(self.encoder(x_0)) x = F.dropout(x, p=self.dropout, training=self.training) - x = self.conv(x, incidence) + x = self.conv(x, incidence_1) return F.relu(self.decoder(x)) diff --git a/topomodelx/nn/hypergraph/allset_transformer.py b/topomodelx/nn/hypergraph/allset_transformer.py index ffaa4279..67bfd882 100644 --- a/topomodelx/nn/hypergraph/allset_transformer.py +++ b/topomodelx/nn/hypergraph/allset_transformer.py @@ -26,6 +26,8 @@ class AllSetTransformer(torch.nn.Module): Number of layers in the MLP. mlp_dropout : float, default: 0.2 Dropout probability in the MLP. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -44,6 +46,7 @@ def __init__( dropout=0.2, mlp_num_layers=2, mlp_dropout=0.2, + **kwargs, ): super().__init__() @@ -55,6 +58,7 @@ def __init__( heads=heads, mlp_num_layers=mlp_num_layers, mlp_dropout=mlp_dropout, + **kwargs, ) for i in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/allset_transformer_layer.py b/topomodelx/nn/hypergraph/allset_transformer_layer.py index 3dc33569..1e805e4c 100644 --- a/topomodelx/nn/hypergraph/allset_transformer_layer.py +++ b/topomodelx/nn/hypergraph/allset_transformer_layer.py @@ -32,6 +32,8 @@ class AllSetTransformerLayer(nn.Module): Dropout probability in the MLP. mlp_norm : str or None, optional Type of layer normalization in the MLP. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -52,6 +54,7 @@ def __init__( mlp_activation=nn.ReLU, mlp_dropout: float = 0.0, mlp_norm=None, + **kwargs, ) -> None: super().__init__() diff --git a/topomodelx/nn/hypergraph/dhgcn.py b/topomodelx/nn/hypergraph/dhgcn.py index 846e78d9..e6f02e6c 100644 --- a/topomodelx/nn/hypergraph/dhgcn.py +++ b/topomodelx/nn/hypergraph/dhgcn.py @@ -16,8 +16,10 @@ class DHGCN(torch.nn.Module): Dimension of the input features. hidden_channels : int Dimension of the hidden features. - n_layer : int, default = 2 + n_layers : int, default = 2 Amount of message passing layers. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -31,6 +33,7 @@ def __init__( in_channels, hidden_channels, n_layers=1, + **kwargs, ): super().__init__() @@ -39,6 +42,7 @@ def __init__( in_channels=in_channels if i == 0 else hidden_channels, intermediate_channels=hidden_channels, out_channels=hidden_channels, + **kwargs, ) for i in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/dhgcn_layer.py b/topomodelx/nn/hypergraph/dhgcn_layer.py index c0502a95..37c3446a 100644 --- a/topomodelx/nn/hypergraph/dhgcn_layer.py +++ b/topomodelx/nn/hypergraph/dhgcn_layer.py @@ -18,6 +18,14 @@ class DHGCNLayer(torch.nn.Module): Dimension of intermediate features. out_channels : int Dimension of output features. + k_neighbours : int, default=3 + Number of neighbours to consider in the local topology. + k_centroids : int, default=4 + Number of centroids to consider in the global topology. + device : str, default="cpu" + Device to store the tensors. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -40,6 +48,7 @@ def __init__( k_neighbours: int = 3, k_centroids: int = 4, device: str = "cpu", + **kwargs, ) -> None: super().__init__() @@ -69,7 +78,7 @@ def kmeans_graph(x, k, flow: str = "source_to_target"): x : torch.Tensor, shape = (n_nodes, node_features) Input features on the nodes of the simplicial complex. k : int - Number of clusters/centroids + Number of clusters/centroids. flow : str If this parameter has value "source_to_target", the output will have the shape [n_nodes, n_hyperedges = k_centroids]. @@ -80,11 +89,9 @@ def kmeans_graph(x, k, flow: str = "source_to_target"): Returns ------- - hyperedge_index : torch.Tensor, shape = (n_nodes, 2) - Indices of the on-zero values in the feature matrix of hypergraph - convolutional network. - The order of dimensions of the matrix is defined by the value of the flow - parameter. + torch.Tensor + Indices of the on-zero values in the feature matrix of hypergraph convolutional network. + The order of dimensions of the matrix is defined by the value of the flow parameter. """ assert flow in ["source_to_target", "target_to_source"] device = x.device @@ -142,10 +149,12 @@ def kmeans(self, x_0, k=None): ---------- x_0 : torch.Tensor, shape = (n_nodes, node_features) Input features on the nodes of the simplicial complex. + k : int + Number of clusters/centroids. Returns ------- - hyperedge_index : torch.Tensor, shape = (n_nodes, 2) + torch.Tensor Indices of the on-zero values in the feature matrix of hypergraph convolutional network. """ if k is None: @@ -166,8 +175,8 @@ def get_dynamic_topology(self, x_0_features): Returns ------- - hyperedge_incidence_matrix : torch.Tensor, shape = (n_nodes, n_nodes + k_centroids) - Incidence matrix mapping edges to nodes. + torch.Tensor + Incidence matrix mapping edges to nodes, shape = (n_nodes, n_nodes + k_centroids). """ device = x_0_features.device n_nodes = x_0_features.size(0) diff --git a/topomodelx/nn/hypergraph/hmpnn.py b/topomodelx/nn/hypergraph/hmpnn.py index d2ef5a6e..6dce8406 100644 --- a/topomodelx/nn/hypergraph/hmpnn.py +++ b/topomodelx/nn/hypergraph/hmpnn.py @@ -12,18 +12,18 @@ class HMPNN(torch.nn.Module): Parameters ---------- in_channels : int - Dimension of input features + Dimension of input features. hidden_channels : Tuple[int] A tuple of hidden feature dimensions to gradually reduce node/hyperedge representations feature dimension from in_features to the last item in the tuple. - num_classes: int - Number of classes n_layers : int, default = 2 Number of HMPNNLayer layers. - adjacency_dropout_rate: int, default = 0.7 + adjacency_dropout_rate : int, default = 0.7 Adjacency dropout rate. regular_dropout_rate : int, default = 0.5 Regular dropout rate applied on features. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -40,6 +40,7 @@ def __init__( n_layers=2, adjacency_dropout_rate=0.7, regular_dropout_rate=0.5, + **kwargs, ): super().__init__() @@ -52,6 +53,7 @@ def __init__( hidden_channels, adjacency_dropout=adjacency_dropout_rate, updating_dropout=regular_dropout_rate, + **kwargs, ) for _ in range(n_layers) ] @@ -66,7 +68,7 @@ def forward(self, x_0, x_1, incidence_1): Node features. x_1 : torch.Tensor, shape = (n_hyperedges, in_features) Hyperedge features. - incidence_1: torch.sparse.Tensor, shape = (n_nodes, n_hyperedges) + incidence_1 : torch.sparse.Tensor, shape = (n_nodes, n_hyperedges) Incidence matrix (B1). Returns diff --git a/topomodelx/nn/hypergraph/hmpnn_layer.py b/topomodelx/nn/hypergraph/hmpnn_layer.py index c144acc9..7e1e5d2d 100644 --- a/topomodelx/nn/hypergraph/hmpnn_layer.py +++ b/topomodelx/nn/hypergraph/hmpnn_layer.py @@ -10,9 +10,25 @@ class _AdjacencyDropoutMixin: + r"""Mixin class for applying dropout to adjacency matrices.""" + training: bool def apply_dropout(self, neighborhood, dropout_rate: float): + r"""Apply dropout to the adjacency matrix. + + Parameters + ---------- + neighborhood : torch.sparse.Tensor + Sparse tensor representing the adjacency matrix. + dropout_rate : float + Dropout rate. + + Returns + ------- + torch.sparse.Tensor + Sparse tensor with dropout applied. + """ neighborhood = neighborhood.coalesce() return torch.sparse_coo_tensor( neighborhood.indices(), @@ -24,6 +40,18 @@ def apply_dropout(self, neighborhood, dropout_rate: float): class _NodeToHyperedgeMessenger(MessagePassing, _AdjacencyDropoutMixin): + r"""Node to Hyperedge Messenger class. + + Parameters + ---------- + messaging_func : callable + Function for messaging from nodes to hyperedges. + adjacency_dropout : float, default = 0.7 + Dropout rate for the adjacency matrix. + aggr_func : Literal["sum", "mean", "add"], default="sum" + Message aggregation function. + """ + def __init__( self, messaging_func, @@ -35,9 +63,37 @@ def __init__( self.adjacency_dropout = adjacency_dropout def message(self, x_source): + r"""Message function. + + Parameters + ---------- + x_source : torch.Tensor + Source node features. + + Returns + ------- + torch.Tensor + Message passed from the source node to the hyperedge. + """ return self.messaging_func(x_source) def forward(self, x_source, neighborhood): + r"""Forward computation. + + Parameters + ---------- + x_source : torch.Tensor + Source node features. + neighborhood : torch.sparse.Tensor + Sparse tensor representing the adjacency matrix. + + Returns + ------- + x_message_aggregated : torch.Tensor + Aggregated messages passed from the nodes to the hyperedge. + x_message : torch.Tensor + Messages passed from the nodes to the hyperedge. + """ neighborhood = self.apply_dropout(neighborhood, self.adjacency_dropout) source_index_j, self.target_index_i = neighborhood.indices() @@ -49,6 +105,18 @@ def forward(self, x_source, neighborhood): class _HyperedgeToNodeMessenger(MessagePassing, _AdjacencyDropoutMixin): + r"""Hyperedge to Node Messenger class. + + Parameters + ---------- + messaging_func : callable + Function for messaging from hyperedges to nodes. + adjacency_dropout : float, default = 0.7 + Dropout rate for the adjacency matrix. + aggr_func : Literal["sum", "mean", "add"], default="sum" + Message aggregation function. + """ + def __init__( self, messaging_func, @@ -60,6 +128,22 @@ def __init__( self.adjacency_dropout = adjacency_dropout def message(self, x_source, neighborhood, node_messages): + r"""Message function. + + Parameters + ---------- + x_source : torch.Tensor + Source hyperedge features. + neighborhood : torch.sparse.Tensor + Sparse tensor representing the adjacency matrix. + node_messages : torch.Tensor + Messages passed from the nodes to the hyperedge. + + Returns + ------- + torch.Tensor + Message passed from the hyperedge to the nodes. + """ hyperedge_neighborhood = self.apply_dropout( neighborhood, self.adjacency_dropout ) @@ -71,6 +155,22 @@ def message(self, x_source, neighborhood, node_messages): return self.messaging_func(x_source, node_messages_aggregated) def forward(self, x_source, neighborhood, node_messages): + r"""Forward computation. + + Parameters + ---------- + x_source : torch.Tensor + Source hyperedge features. + neighborhood : torch.sparse.Tensor + Sparse tensor representing the adjacency matrix. + node_messages : torch.Tensor + Messages passed from the nodes to the hyperedge. + + Returns + ------- + torch.Tensor + Aggregated messages passed from the hyperedge to the nodes. + """ x_message = self.message(x_source, neighborhood, node_messages) neighborhood = self.apply_dropout(neighborhood, self.adjacency_dropout) @@ -80,19 +180,63 @@ def forward(self, x_source, neighborhood, node_messages): class _DefaultHyperedgeToNodeMessagingFunc(nn.Module): + r"""Default hyperedge to node messaging function. + + Parameters + ---------- + in_channels : int + Dimension of the input features. + """ + def __init__(self, in_channels) -> None: super().__init__() self.linear = nn.Linear(2 * in_channels, in_channels) def forward(self, x_1, m_0): + r"""Forward computation. + + Parameters + ---------- + x_1 : torch.Tensor + Input hyperedge features. + m_0 : torch.Tensor + Aggregated messages from the nodes. + + Returns + ------- + torch.Tensor + Messages passed from the hyperedge to the nodes. + """ return F.sigmoid(self.linear(torch.cat((x_1, m_0), dim=1))) class _DefaultUpdatingFunc(nn.Module): + r"""Default updating function. + + Parameters + ---------- + in_channels : int + Dimension of the input features. + """ + def __init__(self, in_channels) -> None: super().__init__() def forward(self, x, m): + r"""Forward computation. + + Parameters + ---------- + x : torch.Tensor + Input features. + m : torch.Tensor + Messages passed from the neighbors. + + Returns + ------- + torch.Tensor + Updated features. + """ return F.sigmoid(x + m) @@ -140,6 +284,8 @@ class HMPNNLayer(nn.Module): The final function or nn.Module object to be called on node and hyperedge features to retrieve their new representation. If not given, a linear layer is applied, received message is added and sigmoid is called. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -158,6 +304,7 @@ def __init__( aggr_func: Literal["sum", "mean", "add"] = "sum", updating_dropout: float = 0.5, updating_func=None, + **kwargs, ) -> None: super().__init__() @@ -188,6 +335,16 @@ def apply_regular_dropout(self, x): Unmasked features in a vector are scaled by d+k / d in which k is the number of masked features in the vector and d is the total number of features. + + Parameters + ---------- + x : torch.Tensor + Input features. + + Returns + ------- + torch.Tensor + Output features. """ if self.training: mask = self.dropout.sample(x.shape).to(dtype=torch.float, device=x.device) diff --git a/topomodelx/nn/hypergraph/hnhn.py b/topomodelx/nn/hypergraph/hnhn.py index 6d8c0dc6..ba4fb878 100644 --- a/topomodelx/nn/hypergraph/hnhn.py +++ b/topomodelx/nn/hypergraph/hnhn.py @@ -20,6 +20,8 @@ class HNHN(torch.nn.Module): Number of HNHN message passing layers. layer_drop : float, default = 0.2 Dropout rate for the hidden features. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -30,7 +32,13 @@ class HNHN(torch.nn.Module): """ def __init__( - self, in_channels, hidden_channels, incidence_1, n_layers=2, layer_drop=0.2 + self, + in_channels, + hidden_channels, + incidence_1, + n_layers=2, + layer_drop=0.2, + **kwargs, ): super().__init__() @@ -39,6 +47,7 @@ def __init__( in_channels=in_channels if i == 0 else hidden_channels, hidden_channels=hidden_channels, incidence_1=incidence_1, + **kwargs, ) for i in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/hnhn_layer.py b/topomodelx/nn/hypergraph/hnhn_layer.py index f8008434..94d9e3ff 100644 --- a/topomodelx/nn/hypergraph/hnhn_layer.py +++ b/topomodelx/nn/hypergraph/hnhn_layer.py @@ -39,6 +39,8 @@ class HNHNLayer(torch.nn.Module): Gain for the bias initialization. bias_init : Literal["xavier_uniform", "xavier_normal"], default="xavier_uniform" Controls the bias initialization method. + **kwargs : optional + Additional arguments for the layer modules. Notes ----- @@ -69,6 +71,7 @@ def __init__( beta: float = -0.5, bias_gain: float = 1.414, bias_init: Literal["xavier_uniform", "xavier_normal"] = "xavier_uniform", + **kwargs, ) -> None: super().__init__() self.use_bias = use_bias @@ -184,7 +187,7 @@ def forward(self, x_0, incidence_1=None): ---------- x_0 : torch.Tensor, shape = (n_nodes, channels_node) Input features on the hypernodes. - incidence_1: torch.Tensor, shape = (n_nodes, n_edges) + incidence_1 : torch.Tensor, shape = (n_nodes, n_edges) Incidence matrix mapping edges to nodes (B_1). Returns diff --git a/topomodelx/nn/hypergraph/hypergat.py b/topomodelx/nn/hypergraph/hypergat.py index e8449cc7..02d480c7 100644 --- a/topomodelx/nn/hypergraph/hypergat.py +++ b/topomodelx/nn/hypergraph/hypergat.py @@ -16,8 +16,10 @@ class HyperGAT(torch.nn.Module): Dimension of the hidden features. n_layers : int, default = 2 Amount of message passing layers. - layer_drop: float, default = 0.2 + layer_drop : float, default = 0.2 Dropout rate for the hidden features. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -32,6 +34,7 @@ def __init__( hidden_channels, n_layers=2, layer_drop=0.2, + **kwargs, ): super().__init__() @@ -39,6 +42,7 @@ def __init__( HyperGATLayer( in_channels=in_channels if i == 0 else hidden_channels, hidden_channels=hidden_channels, + **kwargs, ) for i in range(n_layers) ) @@ -49,8 +53,8 @@ def forward(self, x_0, incidence_1): Parameters ---------- - x_1 : torch.Tensor, shape = (n_edges, channels_edge) - Edge features. + x_0 : torch.Tensor, shape = (n_nodes, channels_nodes) + Node features. incidence_1 : torch.Tensor, shape = (n_nodes, n_edges) Boundary matrix of rank 1. diff --git a/topomodelx/nn/hypergraph/hypergat_layer.py b/topomodelx/nn/hypergraph/hypergat_layer.py index c41bb4b7..6f303aa5 100644 --- a/topomodelx/nn/hypergraph/hypergat_layer.py +++ b/topomodelx/nn/hypergraph/hypergat_layer.py @@ -21,6 +21,8 @@ class HyperGATLayer(MessagePassing): Initialization method. initialization_gain : float, default=1.414 Gain for the initialization. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -36,6 +38,7 @@ def __init__( update_func: str = "relu", initialization: Literal["xavier_uniform", "xavier_normal"] = "xavier_uniform", initialization_gain: float = 1.414, + **kwargs, ) -> None: super().__init__( initialization=initialization, initialization_gain=initialization_gain @@ -152,7 +155,7 @@ def update(self, x_message_on_target): return torch.nn.functional.relu(x_message_on_target) return None - def forward(self, x_source, incidence): + def forward(self, x_0, incidence_1): r"""Forward pass. .. math:: @@ -167,9 +170,9 @@ def forward(self, x_source, incidence): Parameters ---------- - x_source : torch.Tensor + x_0 : torch.Tensor Input features. - incidence : torch.sparse + incidence_1 : torch.sparse Incidence matrix between nodes and hyperedges. Returns @@ -179,30 +182,30 @@ def forward(self, x_source, incidence): x_1 : torch.Tensor Output hyperedge features. """ - intra_aggregation = incidence.t() @ (x_source @ self.weight1) + intra_aggregation = incidence_1.t() @ (x_0 @ self.weight1) - self.target_index_i, self.source_index_j = incidence.indices() + self.target_index_i, self.source_index_j = incidence_1.indices() attention_values = self.attention(intra_aggregation).squeeze() incidence_with_attention = torch.sparse_coo_tensor( - indices=incidence.indices(), - values=incidence.values() * attention_values, - size=incidence.shape, + indices=incidence_1.indices(), + values=incidence_1.values() * attention_values, + size=incidence_1.shape, ) intra_aggregation_with_attention = incidence_with_attention.t() @ ( - x_source @ self.weight1 + x_0 @ self.weight1 ) messages_on_edges = self.update(intra_aggregation_with_attention) - inter_aggregation = incidence @ (messages_on_edges @ self.weight2) + inter_aggregation = incidence_1 @ (messages_on_edges @ self.weight2) attention_values = self.attention( inter_aggregation, intra_aggregation ).squeeze() incidence_with_attention = torch.sparse_coo_tensor( - indices=incidence.indices(), - values=attention_values * incidence.values(), - size=incidence.shape, + indices=incidence_1.indices(), + values=attention_values * incidence_1.values(), + size=incidence_1.shape, ) inter_aggregation_with_attention = incidence_with_attention @ ( messages_on_edges @ self.weight2 diff --git a/topomodelx/nn/hypergraph/hypersage.py b/topomodelx/nn/hypergraph/hypersage.py index a9744225..588461ab 100644 --- a/topomodelx/nn/hypergraph/hypersage.py +++ b/topomodelx/nn/hypergraph/hypersage.py @@ -14,10 +14,12 @@ class HyperSAGE(torch.nn.Module): Dimension of the input features. hidden_channels : int Dimension of the hidden features. - n_layer : int, default = 2 + n_layers : int, default = 2 Amount of message passing layers. alpha : int, default = -1 - Max number of nodes in a neighborhood to consider. If -1 it considers all the nodes.รน + Max number of nodes in a neighborhood to consider. If -1 it considers all the nodes. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -39,14 +41,14 @@ def __init__(self, in_channels, hidden_channels, n_layers=2, alpha=-1, **kwargs) for i in range(n_layers) ) - def forward(self, x_0, incidence): + def forward(self, x_0, incidence_1): """Forward computation through layers, then linear layer, then global max pooling. Parameters ---------- - x : torch.Tensor, shape = (n_nodes, features_nodes) + x_0 : torch.Tensor, shape = (n_nodes, features_nodes) Edge features. - incidence : torch.Tensor, shape = (n_nodes, n_edges) + incidence_1 : torch.Tensor, shape = (n_nodes, n_edges) Boundary matrix of rank 1. Returns @@ -55,6 +57,6 @@ def forward(self, x_0, incidence): Label assigned to whole complex. """ for layer in self.layers: - x_0 = layer.forward(x_0, incidence) + x_0 = layer.forward(x_0, incidence_1) return x_0 diff --git a/topomodelx/nn/hypergraph/hypersage_layer.py b/topomodelx/nn/hypergraph/hypersage_layer.py index d296569b..3b114497 100644 --- a/topomodelx/nn/hypergraph/hypersage_layer.py +++ b/topomodelx/nn/hypergraph/hypersage_layer.py @@ -61,6 +61,8 @@ class HyperSAGELayer(MessagePassing): Initialization method. device : str, default="cpu" Device name to train layer on. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -87,6 +89,7 @@ def __init__( "uniform", "xavier_uniform", "xavier_normal" ] = "uniform", device: str = "cpu", + **kwargs, ) -> None: super().__init__( initialization=initialization, @@ -191,6 +194,18 @@ def forward(self, x: torch.Tensor, incidence: torch.Tensor): # type: ignore[ove """ def nodes_per_edge(e): + r"""Get nodes per edge. + + Parameters + ---------- + e : int + Edge index. + + Returns + ------- + torch.Tensor + Nodes per edge. + """ messages = ( torch.index_select( input=incidence.to("cpu"), dim=1, index=torch.LongTensor([e]) @@ -204,6 +219,18 @@ def nodes_per_edge(e): return messages[torch.randperm(len(messages))[: self.alpha]] def edges_per_node(v): + r"""Get edges per node. + + Parameters + ---------- + v : int + Node index. + + Returns + ------- + torch.Tensor + Edges per node. + """ return ( torch.index_select( input=incidence.to("cpu"), dim=0, index=torch.LongTensor([v]) diff --git a/topomodelx/nn/hypergraph/unigcn.py b/topomodelx/nn/hypergraph/unigcn.py index 0852fb23..58998943 100644 --- a/topomodelx/nn/hypergraph/unigcn.py +++ b/topomodelx/nn/hypergraph/unigcn.py @@ -16,6 +16,8 @@ class UniGCN(torch.nn.Module): Dimension of the hidden features. n_layers : int, default = 2 Amount of message passing layers. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -30,6 +32,7 @@ def __init__( in_channels, hidden_channels, n_layers=2, + **kwargs, ): super().__init__() @@ -37,6 +40,7 @@ def __init__( UniGCNLayer( in_channels=in_channels if i == 0 else hidden_channels, hidden_channels=hidden_channels, + **kwargs, ) for i in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/unigcn_layer.py b/topomodelx/nn/hypergraph/unigcn_layer.py index 84a402ee..6b2dded1 100644 --- a/topomodelx/nn/hypergraph/unigcn_layer.py +++ b/topomodelx/nn/hypergraph/unigcn_layer.py @@ -16,10 +16,12 @@ class UniGCNLayer(torch.nn.Module): Dimension of the input features. hidden_channels : int Dimension of the hidden features. - use_bn : bool, default=False - Whether to use bathnorm after the linear transformation. aggr_norm : bool, default=False Whether to normalize the aggregated message by the neighborhood size. + use_bn : bool, default=False + Whether to use bathnorm after the linear transformation. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -41,6 +43,7 @@ def __init__( hidden_channels, aggr_norm: bool = False, use_bn: bool = False, + **kwargs, ) -> None: super().__init__() diff --git a/topomodelx/nn/hypergraph/unigcnii.py b/topomodelx/nn/hypergraph/unigcnii.py index 47d3673c..6aed07c8 100644 --- a/topomodelx/nn/hypergraph/unigcnii.py +++ b/topomodelx/nn/hypergraph/unigcnii.py @@ -28,6 +28,8 @@ class UniGCNII(torch.nn.Module): Dropout rate for the hidden features. use_norm : bool, default=False Whether to apply row normalization after every layer. + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -47,6 +49,7 @@ def __init__( input_drop=0.2, layer_drop=0.2, use_norm=False, + **kwargs, ): super().__init__() layers = [] @@ -65,6 +68,7 @@ def __init__( alpha=alpha, beta=beta, use_norm=use_norm, + **kwargs, ) ) diff --git a/topomodelx/nn/hypergraph/unigcnii_layer.py b/topomodelx/nn/hypergraph/unigcnii_layer.py index a0b06700..f6c5886b 100644 --- a/topomodelx/nn/hypergraph/unigcnii_layer.py +++ b/topomodelx/nn/hypergraph/unigcnii_layer.py @@ -20,6 +20,8 @@ class UniGCNIILayer(torch.nn.Module): The beta parameter determining the importance of the learned matrix (\theta_1). use_norm : bool, default=False Whether to apply row normalization after the layer. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -30,7 +32,13 @@ class UniGCNIILayer(torch.nn.Module): """ def __init__( - self, in_channels, hidden_channels, alpha: float, beta: float, use_norm=False + self, + in_channels, + hidden_channels, + alpha: float, + beta: float, + use_norm=False, + **kwargs, ) -> None: super().__init__() @@ -86,7 +94,7 @@ def forward(self, x_0, incidence_1, x_skip=None): incidence_1 : torch.Tensor, shape = (num_nodes, num_edges) Incidence matrix of the hypergraph. It is expected that the incidence matrix contains self-loops for all nodes. - x_skip : torch.Tensor, shape = (num_nodes, in_channels), optional + x_skip : torch.Tensor, shape = (num_nodes, in_channels) Original node features of the hypergraph used for the skip connections. If not provided, the input to the layer is used as a skip connection. @@ -96,7 +104,6 @@ def forward(self, x_0, incidence_1, x_skip=None): Output node features. x_1 : torch.Tensor Output hyperedge features. - """ x_skip = x_0 if x_skip is None else x_skip incidence_1_transpose = incidence_1.transpose(0, 1) diff --git a/topomodelx/nn/hypergraph/unigin.py b/topomodelx/nn/hypergraph/unigin.py index d9549ada..1c0cd0a3 100644 --- a/topomodelx/nn/hypergraph/unigin.py +++ b/topomodelx/nn/hypergraph/unigin.py @@ -26,7 +26,8 @@ class UniGIN(torch.nn.Module): Whether to make eps a trainable parameter. use_norm : bool, default=False Whether to apply row normalization after every layer. - + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -46,6 +47,7 @@ def __init__( eps=0, train_eps=False, use_norm=False, + **kwargs, ): super().__init__() @@ -60,6 +62,7 @@ def __init__( eps=eps, train_eps=train_eps, use_norm=use_norm, + **kwargs, ) for _ in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/unigin_layer.py b/topomodelx/nn/hypergraph/unigin_layer.py index e92202ac..29686cc5 100644 --- a/topomodelx/nn/hypergraph/unigin_layer.py +++ b/topomodelx/nn/hypergraph/unigin_layer.py @@ -19,7 +19,8 @@ class UniGINLayer(torch.nn.Module): Whether to make eps a trainable parameter. use_norm : bool, default=False Whether to apply row normalization after the layer. - + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -41,6 +42,7 @@ def __init__( eps: float = 0.0, train_eps: bool = False, use_norm: bool = False, + **kwargs, ) -> None: super().__init__() diff --git a/topomodelx/nn/hypergraph/unisage.py b/topomodelx/nn/hypergraph/unisage.py index a43b89dc..4b2cce19 100644 --- a/topomodelx/nn/hypergraph/unisage.py +++ b/topomodelx/nn/hypergraph/unisage.py @@ -26,9 +26,10 @@ class UniSAGE(torch.nn.Module): Aggregator function for hyperedges. v_aggr : Literal["sum", "mean",], default="mean" Aggregator function for nodes. - use_norm : boolean + use_norm : bool Whether to apply row normalization after every layer. - + **kwargs : optional + Additional arguments for the inner layers. References ---------- @@ -54,6 +55,7 @@ def __init__( "mean", ] = "mean", use_norm: bool = False, + **kwargs, ): super().__init__() @@ -67,6 +69,7 @@ def __init__( e_aggr=e_aggr, v_aggr=v_aggr, use_norm=use_norm, + **kwargs, ) for i in range(n_layers) ) diff --git a/topomodelx/nn/hypergraph/unisage_layer.py b/topomodelx/nn/hypergraph/unisage_layer.py index 58e56545..dffdb990 100644 --- a/topomodelx/nn/hypergraph/unisage_layer.py +++ b/topomodelx/nn/hypergraph/unisage_layer.py @@ -20,8 +20,10 @@ class UniSAGELayer(torch.nn.Module): Aggregator function for hyperedges. v_aggr : Literal["sum", "mean",], default="mean" Aggregator function for nodes. - use_norm : boolean + use_norm : bool Whether to apply row normalization after every layer. + **kwargs : optional + Additional arguments for the layer modules. References ---------- @@ -38,6 +40,13 @@ class UniSAGELayer(torch.nn.Module): """ def _validate_aggr(self, aggr): + r"""Validate aggregator. + + Parameters + ---------- + aggr : str + Aggregator function. + """ if aggr not in { "sum", "mean", @@ -59,6 +68,7 @@ def __init__( "mean", ] = "mean", use_norm: bool = False, + **kwargs, ) -> None: super().__init__() self.in_channels = in_channels