Skip to content

mlp

classic Mlp for xpdeep API.

Classes:

Name Description
MLP

Convenient Mlp model.

MLP(input_size: int, hidden_channels: list[int], norm_layer: Callable[..., torch.nn.Module] | None = None, activation_layer: Callable[..., torch.nn.Module] | None = torch.nn.ReLU, dropout: float = 0.0, last_activation: partial[torch.nn.Module] | None = None, *, inplace: bool | None = None, bias: bool = True, flatten_input: bool = False) #

Convenient Mlp model.

Initialize a Multi-Layer Perceptron model.

Parameters:

Name Type Description Default

input_size #

int

Number of channels of the input.

required

hidden_channels #

List[int]

List of the hidden channel dimensions.

required

norm_layer #

Callable[..., Module] | None

Norm layer that will be stacked on top of the linear layer. If None, this layer won't be used.

None

activation_layer #

Callable[..., Module] | None

Activation function, which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer.

torch.nn.Relu

inplace #

bool | None

Parameter for the activation layer, which can optionally do the operation in-place. Default is None, which uses the respective default values of the activation_layer and Dropout layer.

None

bias #

bool

Whether to use bias in the linear layer.

True

dropout #

float

The probability for the dropout layer.

0

last_activation #

Module | None

Last activation function.

None

flatten_input #

bool

Whether to flatten the input or not.

False

Methods:

Name Description
reset_parameters

Reset model parameters to get new values when copying it.

Source code in src/xpdeep/model/zoo/mlp.py
def __init__(  # noqa: PLR0913, PLR0917
    self,
    input_size: int,
    hidden_channels: list[int],
    norm_layer: Callable[..., torch.nn.Module] | None = None,
    activation_layer: Callable[..., torch.nn.Module] | None = torch.nn.ReLU,
    dropout: float = 0.0,
    last_activation: partial[torch.nn.Module] | None = None,
    *,
    inplace: bool | None = None,
    bias: bool = True,
    flatten_input: bool = False,
) -> None:
    """Initialize a Multi-Layer Perceptron model.

    Parameters
    ----------
    input_size : int
        Number of channels of the input.
    hidden_channels : List[int]
        List of the hidden channel dimensions.
    norm_layer : Callable[..., torch.nn.Module] | None
        Norm layer that will be stacked on top of the linear layer. If ``None``, this layer won't be used.
    activation_layer : Callable[..., torch.nn.Module] | None, default torch.nn.Relu
        Activation function, which will be stacked on top of the normalization layer (if not None), otherwise on top
        of the linear layer.
    inplace : bool | None, default None
        Parameter for the activation layer, which can optionally do the operation in-place.
        Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
    bias : bool, default True
        Whether to use bias in the linear layer.
    dropout : float, default 0
        The probability for the dropout layer.
    last_activation : torch.nn.Module | None, default None
        Last activation function.
    flatten_input : bool, default False
        Whether to flatten the input or not.
    """
    # The addition of `norm_layer` is inspired from the implementation of TorchMultimodal:
    # https://github.com/facebookresearch/multimodal/blob/5dec8a/torchmultimodal/modules/layers/mlp.py
    params = {} if inplace is None else {"inplace": inplace}

    layers: list[nn.Module] = []

    if flatten_input:
        layers.append(torch.nn.Flatten())
    in_dim = input_size
    for hidden_dim in hidden_channels[:-1]:
        layers.append(torch.nn.Linear(in_dim, hidden_dim, bias=bias))
        if norm_layer is not None:
            layers.append(norm_layer(hidden_dim))
        if activation_layer is not None:
            layers.append(activation_layer(**params))
        if dropout > 0:
            layers.append(torch.nn.Dropout(dropout, **params))
        in_dim = hidden_dim

    # Remove useless 1 dimension.
    layers.append(torch.nn.Linear(in_dim, hidden_channels[-1], bias=bias))

    if last_activation is not None:
        layers.append(last_activation(**params))
    super().__init__(*layers)

reset_parameters() -> None #

Reset model parameters to get new values when copying it.

Source code in src/xpdeep/model/zoo/mlp.py
def reset_parameters(self) -> None:
    """Reset model parameters to get new values when copying it."""
    for layer in self.children():
        if hasattr(layer, "reset_parameters"):
            layer.reset_parameters()