Skip to content

doc

Define models used in documentation tutorials.

Classes:

Name Description
EcgCNN

Ecg model, backbone model for ECG dataset.

GazPriceMLP

Gas Price model, task_learner model for gas price dataset.

MnistCNN

MNIST model, backbone model for MNIST dataset.

EcgCNN(dropout: float = 0.0, output_size: int = 50, *, out_channels: tuple[int, ...] = (32, 64, 128), with_softmax: bool = False) #

Ecg model, backbone model for ECG dataset.

Initialize a basic CNN model for classification.

Methods:

Name Description
forward

Forward pass.

Source code in src/xpdeep/model/zoo/doc.py
def __init__(
    self,
    dropout: float = 0.0,
    output_size: int = 50,
    *,
    out_channels: tuple[int, ...] = (32, 64, 128),
    with_softmax: bool = False,
):
    """Initialize a basic CNN model for classification."""
    super().__init__(
        in_channels=1,
        out_channels=out_channels,
        output_size=output_size,
        dropout=dropout,
        with_softmax=with_softmax,
    )

forward(x: Tensor) -> Tensor #

Forward pass.

Source code in src/xpdeep/model/zoo/doc.py
def forward(self, x: Tensor) -> Tensor:
    """Forward pass."""
    return super().forward(x.transpose(1, 2))

GazPriceMLP(input_size: int, hidden_channels: list[int], norm_layer: Callable[..., torch.nn.Module] | None = None, activation_layer: Callable[..., torch.nn.Module] | None = torch.nn.ReLU, dropout: float = 0.0, last_activation: partial[torch.nn.Module] | None = None, *, inplace: bool | None = None, bias: bool = True, flatten_input: bool = False) #

Gas Price model, task_learner model for gas price dataset.

Methods:

Name Description
forward

Forward pass of the model.

Source code in src/xpdeep/model/zoo/mlp.py
def __init__(  # noqa: PLR0913, PLR0917
    self,
    input_size: int,
    hidden_channels: list[int],
    norm_layer: Callable[..., torch.nn.Module] | None = None,
    activation_layer: Callable[..., torch.nn.Module] | None = torch.nn.ReLU,
    dropout: float = 0.0,
    last_activation: partial[torch.nn.Module] | None = None,
    *,
    inplace: bool | None = None,
    bias: bool = True,
    flatten_input: bool = False,
) -> None:
    """Initialize a Multi-Layer Perceptron model.

    Parameters
    ----------
    input_size : int
        Number of channels of the input.
    hidden_channels : List[int]
        List of the hidden channel dimensions.
    norm_layer : Callable[..., torch.nn.Module] | None
        Norm layer that will be stacked on top of the linear layer. If ``None``, this layer won't be used.
    activation_layer : Callable[..., torch.nn.Module] | None, default torch.nn.Relu
        Activation function, which will be stacked on top of the normalization layer (if not None), otherwise on top
        of the linear layer.
    inplace : bool | None, default None
        Parameter for the activation layer, which can optionally do the operation in-place.
        Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
    bias : bool, default True
        Whether to use bias in the linear layer.
    dropout : float, default 0
        The probability for the dropout layer.
    last_activation : torch.nn.Module | None, default None
        Last activation function.
    flatten_input : bool, default False
        Whether to flatten the input or not.
    """
    # The addition of `norm_layer` is inspired from the implementation of TorchMultimodal:
    # https://github.com/facebookresearch/multimodal/blob/5dec8a/torchmultimodal/modules/layers/mlp.py
    params = {} if inplace is None else {"inplace": inplace}

    layers: list[nn.Module] = []

    if flatten_input:
        layers.append(torch.nn.Flatten())
    in_dim = input_size
    for hidden_dim in hidden_channels[:-1]:
        layers.append(torch.nn.Linear(in_dim, hidden_dim, bias=bias))
        if norm_layer is not None:
            layers.append(norm_layer(hidden_dim))
        if activation_layer is not None:
            layers.append(activation_layer(**params))
        if dropout > 0:
            layers.append(torch.nn.Dropout(dropout, **params))
        in_dim = hidden_dim

    # Remove useless 1 dimension.
    layers.append(torch.nn.Linear(in_dim, hidden_channels[-1], bias=bias))

    if last_activation is not None:
        layers.append(last_activation(**params))
    super().__init__(*layers)

forward(x: torch.Tensor) -> torch.Tensor #

Forward pass of the model.

Source code in src/xpdeep/model/zoo/doc.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """Forward pass of the model."""
    x = super().forward(x)
    return x.reshape(-1, 5, 1)

MnistCNN(output_size: int, out_channels: tuple[int, ...] = (16, 32, 64)) #

MNIST model, backbone model for MNIST dataset.

Initialize a basic CNN model for classification.

Methods:

Name Description
forward

MNIST is grayscale.

Source code in src/xpdeep/model/zoo/doc.py
def __init__(self, output_size: int, out_channels: tuple[int, ...] = (16, 32, 64)):
    """Initialize a basic CNN model for classification."""
    super().__init__(in_channels=1, out_channels=out_channels, output_size=output_size)

forward(x: torch.Tensor) -> torch.Tensor #

MNIST is grayscale.

Source code in src/xpdeep/model/zoo/doc.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """MNIST is grayscale."""
    return super().forward(x.unsqueeze(1))