From a pytorch model to a deep explainable model#
For a quick introduction to the Xpdeep APIs, this section demonstrates, on the Adult Income dataset, how to adapt a standard deep model's PyTorch code to transition to designing an explainable deep model.
We will review the key steps involved in designing a deep model, from architecture specification and training to generating explanations (for Xpdeep).
For each step in building a deep model, we provide:
-
Tabs labeled "SOTA and Xpdeep" for code that is identical for both the SOTA deep model and the Xpdeep explainable model.
-
Tabs labeled "Xpdeep" for code specific to the Xpdeep explainable model.
1. Project Setup#
Setup Api Key and URL#
Create a Project#
2. Data preparation#
Read Raw Data#
Split Data#
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets
train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
# Further split the training set into training and validation sets
train_data, val_data = train_test_split(train_data, test_size=0.25, random_state=42)
Conversion to Parquet Format#
import pyarrow as pa
import pyarrow.parquet as pq
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
Upload#
Preprocess Data#
from sklearn.preprocessing import OneHotEncoder, StandardScaler
import numpy as np
# Fit preprocessors
numerical_features = ["age", "educational-num", "capital-gain", "capital-loss", "hours-per-week"]
categorical_features = ["workclass", "marital-status", "occupation", "relationship", "race", "gender", "native-country"]
target_feature = "income"
numerical_features_standard_scaler = StandardScaler().fit(train_data[numerical_features])
categorical_features_encoders = {}
for category in categorical_features:
categorical_features_encoders[category] = OneHotEncoder(sparse_output=False).fit(train_data[[category]])
target_feature_encoder = OneHotEncoder(sparse_output=False).fit(train_data[[target_feature]])
# Transform data
x_train = np.concatenate(
[numerical_features_standard_scaler.transform(train_data[numerical_features])]
+
[categorical_features_encoders[feature].transform(train_data[[feature]]) for feature in categorical_features],
axis=1
)
y_train = target_feature_encoder.transform(train_data[[target_feature]])
x_test = np.concatenate(
[numerical_features_standard_scaler.transform(test_data[numerical_features])]
+
[categorical_features_encoders[feature].transform(test_data[[feature]]) for feature in categorical_features],
axis=1
)
y_test = target_feature_encoder.transform(test_data[[target_feature]])
x_val = np.concatenate(
[numerical_features_standard_scaler.transform(val_data[numerical_features])]
+
[categorical_features_encoders[feature].transform(val_data[[feature]]) for feature in categorical_features],
axis=1
)
y_val = target_feature_encoder.transform(val_data[[target_feature]])
# input and output sizes
input_size = x_train.shape[1]
target_size = y_train.shape[1]
from xpdeep.dataset.parquet_dataset import FittedParquetDataset, ParquetDataset
from xpdeep.dataset.schema.feature.feature import NumericalFeature
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor
from sklearn.preprocessing import StandardScaler
# 1/ Create Analyzed Parquet on Train Dataset
train_dataset = ParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
)
analyzed_train_dataset = train_dataset.analyze(target_names=["income"])
preprocessor = SklearnPreprocessor(preprocess_function=StandardScaler())
analyzed_train_dataset.analyzed_schema["educational-num"] = NumericalFeature(
name="educational-num", is_target=False, preprocessor=preprocessor
)
print(analyzed_train_dataset.analyzed_schema)
#2/ Create Fitted Parquet Datasets
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="validation",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# input and output sizes
input_size = fit_train_dataset.fitted_schema.input_size[1]
target_size = fit_train_dataset.fitted_schema.target_size[1]
3. Model Construction#
Architecture Specification#
Model Instantiation#
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
# Model specifications and hyperparameters.
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
target_homogeneity_pruning_threshold=0.8,
population_pruning_threshold=0.15,
prune_step=5,
target_homogeneity_weight=1.0,
discrimination_weight=0.1,
balancing_weight=0.05,
)
# XPDEEP Model Architecture
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=feature_extractor,
task_learner=task_learner,
decision_graph_parameters=explanation_architecture,
)
4. Training#
Training Specification#
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler, ModelCheckpoint
from functools import partial
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from torch.optim.lr_scheduler import ReduceLROnPlateau
from xpdeep.trainer.trainer import Trainer
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=2, average="micro"), target_as_indexes=True
),
leaf_multi_class_accuracy=TorchLeafMetric(
partial(MulticlassAccuracy, num_classes=2, average="micro"), target_as_indexes=True
),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=2, normalize="all"), target_as_indexes=True
),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(pre_scheduler=partial(ReduceLROnPlateau), step_method="epoch", monitoring_metric="Total loss"),
ModelCheckpoint(monitoring_metric="Total loss", mode="minimize"),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=10,
metrics=metrics,
)
Model Training#
from sklearn.metrics import accuracy_score
import torch
device = "cpu"
def train(X_train, y_train, model, loss_fn, optimizer):
size = len(X_train)
model.train()
total_loss = 0
for batch in range(size//batch_size):
X_batch, y_batch = torch.tensor(X_train[batch*batch_size:(batch+1)*batch_size,:], dtype=torch.float32).to(device), torch.tensor(y_train[batch*batch_size:(batch+1)*batch_size,:], dtype=torch.float32).to(device)
# Compute prediction error
pred = model(X_batch)
loss = loss_fn(pred, y_batch)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
average_loss = total_loss/(size//batch_size)
return average_loss
def eval_(X_test, y_test, model, loss_fn):
size = len(X_test)
model.eval()
with torch.no_grad():
X_test, y_test = torch.tensor(X_test, dtype=torch.float32).to(device), torch.tensor(y_test, dtype=torch.float32).to(device)
pred = model(X_test)
test_loss = loss_fn(pred, y_test).item()
rounded_pred = pred.round()
correct = (rounded_pred == y_test)[:,0].type(torch.float).sum().item()
correct /= size
accuracy = accuracy_score(rounded_pred, y_test)
return rounded_pred, test_loss, accuracy
for t in range(epochs):
print(f"\nEpoch {t+1}\n-------------------------------")
training_loss = train(
x_train,
y_train,
sota_model,
loss_fn,
optimizer
)
_, val_loss, _ = eval_(
x_val,
y_val,
sota_model,
loss_fn
)
print(f"Training Loss: {training_loss}\nValidation Loss: {val_loss}")
_, _, accuracy_on_train = eval_(x_train, y_train, sota_model, loss_fn)
_, _, accuracy_on_validation = eval_(x_val, y_val, sota_model, loss_fn)
_, _, accuracy_on_test = eval_(x_test, y_test, sota_model, loss_fn)
print(f"\nAccuracies: "
f"\nAccuracy on train set : {accuracy_on_train}"
f"\nAccuracy on validation set : {accuracy_on_validation}"
f"\nAccuracy on test set : {accuracy_on_test}"
)
5. Explanation Generation#
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
statistics = DictStats(
distribution_target=DistributionStat(on="target"),
distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000,
quality_metrics=quality_metrics,
metrics=metrics,
statistics=statistics
)
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)