Skip to content

Create your Explainable Model#

Here you will find the required steps to build your explainable model from your original architecture.

Tip

You can retrieve the model expected input size and target size with the FittedSchema object, with its attribute input_size and target_size. It includes the batch dimension as well as the first dimension.

For instance, with the previous fitted schema built along with the fitted_train_dataset:

input_size = fitted_train_dataset.fitted_schema.input_size  # (1, 2), as it includes the batch dimension
target_size = fitted_train_dataset.fitted_schema.target_size  # (1, 3), as it includes the batch dimension

1. Get your Original Pytorch Model#

Let's consider the following basic multi-layers perceptron architecture with an output of size 1 as your original model.

from torch.functional import F
import torch.nn as nn

class SimpleMLP(nn.Module):
    def __init__(self, input_size, hidden_size1, hidden_size2, output_size):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, hidden_size2)
        self.fc3 = nn.Linear(hidden_size2, output_size)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# Instantiate the model
input_size = 2  # Example input size
hidden_size1 = 64
hidden_size2 = 32
output_size = 3  # Example output size

model = SimpleMLP(input_size, hidden_size1, hidden_size2, output_size)

2. Convert your Original Model to an XpdeepModel#

Here, you need to convert your own model architecture into the explainable architecture.

We want to add a backbone model which will be responsible to project your input data into a better embedding space for your task.

import torch.nn.functional as F
import torch.nn as nn

class BackboneModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 64):
        super(BackboneModel, self).__init__()
        self.fc = nn.Linear(input_size, hidden_size)

    def forward(self, x):
        x = F.relu(self.fc(x))
        return x
👀 Full file preview
from prepare_dataset import fitted_train_dataset
from torch import nn
from torch.nn.functional import relu

from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel

model_input_size = fitted_train_dataset.fitted_schema.input_size  # (1, 2), as it includes the batch dimension
model_target_size = fitted_train_dataset.fitted_schema.target_size  # (1, 3), as it includes the batch dimension


class BackboneModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 64):
        super().__init__()
        self.fc = nn.Linear(input_size, hidden_size)

    def forward(self, x):
        return relu(self.fc(x))


class FeatureExtractionModel(nn.Module):
    def __init__(self, input_size: int, hidden_size1: int = 32, output_size: int = 16):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, output_size)

    def forward(self, x):
        x = relu(self.fc1(x))
        return relu(self.fc2(x))


class TaskLearnerModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 8, output_size: int = 3):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.activation = nn.ReLU()
        self.output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.activation(x)
        return self.output(x)


build_configuration = ModelDecisionGraphParameters(
    graph_depth=3,
    target_homogeneity_pruning_threshold=0.9,
    population_pruning_threshold=0.1,
    prune_step=5,
    target_homogeneity_weight=0.2,
    discrimination_weight=0.2,
    balancing_weight=0.2,
)

xpdeep_model = XpdeepModel.from_torch(
    fitted_schema=fitted_train_dataset.fitted_schema,
    feature_extraction=FeatureExtractionModel(input_size=64, output_size=16),
    task_learner=TaskLearnerModel(input_size=16, output_size=model_target_size[1]),
    backbone=BackboneModel(input_size=model_input_size[1], hidden_size=64),
    decision_graph_parameters=build_configuration,
)

Now, you need to adapt your original model SimpleMLP into a feature-extraction model and a task-learner model.

Here please ensure that:

  1. The backbone model output size is compatible with the feature-extraction model input size.
  2. The feature extraction model output size is compatible with the task-learner model input size.
import torch.nn.functional as F
import torch.nn as nn

class FeatureExtractionModel(nn.Module):
    def __init__(self, input_size: int, hidden_size1: int = 32, output_size: int = 16):
        super(FeatureExtractionModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, output_size)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return x
class TaskLearnerModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 8, output_size: int = 3):
        super(TaskLearnerModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.activation = nn.ReLU()
        self.output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.activation(x)
        return self.output(x)
👀 Full file preview
from prepare_dataset import fitted_train_dataset
from torch import nn
from torch.nn.functional import relu

from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel

model_input_size = fitted_train_dataset.fitted_schema.input_size  # (1, 2), as it includes the batch dimension
model_target_size = fitted_train_dataset.fitted_schema.target_size  # (1, 3), as it includes the batch dimension


class BackboneModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 64):
        super().__init__()
        self.fc = nn.Linear(input_size, hidden_size)

    def forward(self, x):
        return relu(self.fc(x))


class FeatureExtractionModel(nn.Module):
    def __init__(self, input_size: int, hidden_size1: int = 32, output_size: int = 16):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, output_size)

    def forward(self, x):
        x = relu(self.fc1(x))
        return relu(self.fc2(x))


class TaskLearnerModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 8, output_size: int = 3):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.activation = nn.ReLU()
        self.output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.activation(x)
        return self.output(x)


build_configuration = ModelDecisionGraphParameters(
    graph_depth=3,
    target_homogeneity_pruning_threshold=0.9,
    population_pruning_threshold=0.1,
    prune_step=5,
    target_homogeneity_weight=0.2,
    discrimination_weight=0.2,
    balancing_weight=0.2,
)

xpdeep_model = XpdeepModel.from_torch(
    fitted_schema=fitted_train_dataset.fitted_schema,
    feature_extraction=FeatureExtractionModel(input_size=64, output_size=16),
    task_learner=TaskLearnerModel(input_size=16, output_size=model_target_size[1]),
    backbone=BackboneModel(input_size=model_input_size[1], hidden_size=64),
    decision_graph_parameters=build_configuration,
)

3. Explainable Model Specifications#

XpdeepModel hyperparameters are stored under the ModelDecisionGraphParameters configuration class. Those hyperparameters specify the model characteristics, its graph structure and complexity, as well as a set of its learning parameters.

Similarly to specifying the parameters of a standard deep model, XpdeepModel hyperparameter values can either be the default values or adjusted according to the dataset complexity and the task at hand.

from xpdeep.model.model_builder import ModelDecisionGraphParameters

build_configuration = ModelDecisionGraphParameters(
    graph_depth=3,
    target_homogeneity_weight=0.2,
    discrimination_weight=0.2,
    balancing_weight=0.2,
    target_homogeneity_pruning_threshold=0.9,
    population_pruning_threshold=0.1,
    prune_step=5
)

The main parameters guiding the specification of the explainable XpdeepModel are:

  • graph_depth: This parameter defines the maximum depth of the decision graph. Increasing the value allows for more decision nodes, providing finer granularity in the decision-making process, but also adds to the complexity of the resulting graph. The depth should be set according to the desired balance between explanation detail and overall complexity.In a classification task, the default value is the logarithm of the number of classes, rounded up to the nearest integer. In a regression task, it is set to 3.

  • target_homogeneity_weight: This parameter controls the homogeneity of the target variable within graph nodes. In classification tasks, a higher value increases class purity within nodes, while in regression tasks, it reduces target variable variance. A high value, particularly greater than 1, may negatively impact the model's performance. A value of 0 disregards target variable homogeneity. By default, the value is set to 0.1.

  • discrimination_weight: This parameter controls the differentiation between the left and right input features at each node. Increasing this weight enhances the distinction between the left and right descriptive features, making the separation more pronounced. A high value, particularly greater than 1, may negatively impact the model's performance. A value of 0 disregards this discriminative constraint.By default,the value is set to 0.01

  • balancing_weight: This parameter regulates the balance between the proportions of individuals sent left and right at each node. A high value, especially above 1, can negatively affect the model's performance. A value of 0 ignores this balancing.By default, the value is set to 0.

  • prune_step: Pruning will be conducted every prune_step epochs. it varies within [1, max-epochs]. To disregard this parameter, set its value to None. This will cause the target_homogeneity_pruning_threshold and population_pruning_threshold parameters to be ignored.

  • target_homogeneity_pruning_threshold: To obtain a decision graph with an optimal structure, nodes that are sufficiently homogeneous are converted into leaves(i.e., pruned). Namely, if the homogeneity of the target variable within a node exceeds this threshold, it is converted into a leaf. A value of 1 ignores this criterion.By default, the value is set to 0.9. In particular:

    • in the classification task, homogeneity is measured by the proportion of the majority class within a node. This parameter varies within the range [max_prop, 1], where max_prop represents the proportion of the majority class in the training data.
    • in the regression task, homogeneity is linked to the variance of the target variable: the lower the variance, the greater the homogeneity. For a normalized target variable \(Y\), this parameter varies within the range [0, 1], and corresponds to \(\(1 - \operatorname{Var}(Y)\)\).
  • population_pruning_threshold: To obtain a decision graph with an optimal structure, nodes that involve very few individuals (i.e., not sufficiently represented) are converted into leaves. This parameter, which ranges from [0, 1], specifies the minimum proportion of individuals required in the nodes. If the proportion falls below this threshold, the node is pruned. To ignore this parameter, set its value to 0.By default, the value is set to 0.01.

Please follow the API reference for more information.

👀 Full file preview
from prepare_dataset import fitted_train_dataset
from torch import nn
from torch.nn.functional import relu

from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel

model_input_size = fitted_train_dataset.fitted_schema.input_size  # (1, 2), as it includes the batch dimension
model_target_size = fitted_train_dataset.fitted_schema.target_size  # (1, 3), as it includes the batch dimension


class BackboneModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 64):
        super().__init__()
        self.fc = nn.Linear(input_size, hidden_size)

    def forward(self, x):
        return relu(self.fc(x))


class FeatureExtractionModel(nn.Module):
    def __init__(self, input_size: int, hidden_size1: int = 32, output_size: int = 16):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, output_size)

    def forward(self, x):
        x = relu(self.fc1(x))
        return relu(self.fc2(x))


class TaskLearnerModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 8, output_size: int = 3):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.activation = nn.ReLU()
        self.output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.activation(x)
        return self.output(x)


build_configuration = ModelDecisionGraphParameters(
    graph_depth=3,
    target_homogeneity_pruning_threshold=0.9,
    population_pruning_threshold=0.1,
    prune_step=5,
    target_homogeneity_weight=0.2,
    discrimination_weight=0.2,
    balancing_weight=0.2,
)

xpdeep_model = XpdeepModel.from_torch(
    fitted_schema=fitted_train_dataset.fitted_schema,
    feature_extraction=FeatureExtractionModel(input_size=64, output_size=16),
    task_learner=TaskLearnerModel(input_size=16, output_size=model_target_size[1]),
    backbone=BackboneModel(input_size=model_input_size[1], hidden_size=64),
    decision_graph_parameters=build_configuration,
)

4. Create the Explainable Model#

Once in possession of your sub-models defined in section 2, you can finally instantiate the XpdeepModel.

Note

An additional parameter fitted_schema is required by torch.export to serialize the model.

It encapsulates the input size, without the batch dimension, given to your XpdeepModel.

Therefore, input_size is either the BackboneModel input size (if provided), or to the FeatureExtractionModel input size otherwise. Here, as we have a BackboneModel, the input size is (10,) and is inferred by the fitted schema.

Assuming you already got your fitted schema from your train dataset, otherwise follow the tutorial here.

from xpdeep.model.xpdeep_model import XpdeepModel

xpdeep_model = XpdeepModel.from_torch(
    fitted_schema=fitted_train_dataset.fitted_schema,
    feature_extraction=FeatureExtractionModel(input_size=64, output_size=16),
    task_learner=TaskLearnerModel(input_size=16, output_size=target_size[0]),
    backbone=BackboneModel(input_size=input_size[1], hidden_size=64),
    decision_graph_parameters=build_configuration
)
👀 Full file preview
from prepare_dataset import fitted_train_dataset
from torch import nn
from torch.nn.functional import relu

from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel

model_input_size = fitted_train_dataset.fitted_schema.input_size  # (1, 2), as it includes the batch dimension
model_target_size = fitted_train_dataset.fitted_schema.target_size  # (1, 3), as it includes the batch dimension


class BackboneModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 64):
        super().__init__()
        self.fc = nn.Linear(input_size, hidden_size)

    def forward(self, x):
        return relu(self.fc(x))


class FeatureExtractionModel(nn.Module):
    def __init__(self, input_size: int, hidden_size1: int = 32, output_size: int = 16):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size1)
        self.fc2 = nn.Linear(hidden_size1, output_size)

    def forward(self, x):
        x = relu(self.fc1(x))
        return relu(self.fc2(x))


class TaskLearnerModel(nn.Module):
    def __init__(self, input_size: int, hidden_size: int = 8, output_size: int = 3):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.activation = nn.ReLU()
        self.output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.activation(x)
        return self.output(x)


build_configuration = ModelDecisionGraphParameters(
    graph_depth=3,
    target_homogeneity_pruning_threshold=0.9,
    population_pruning_threshold=0.1,
    prune_step=5,
    target_homogeneity_weight=0.2,
    discrimination_weight=0.2,
    balancing_weight=0.2,
)

xpdeep_model = XpdeepModel.from_torch(
    fitted_schema=fitted_train_dataset.fitted_schema,
    feature_extraction=FeatureExtractionModel(input_size=64, output_size=16),
    task_learner=TaskLearnerModel(input_size=16, output_size=model_target_size[1]),
    backbone=BackboneModel(input_size=model_input_size[1], hidden_size=64),
    decision_graph_parameters=build_configuration,
)

With your explainable datasets, you can now train your model !