Har Dataset#
In this section, we detail the PyTorch code for designing an explainable deep model for processing the Human Activity Recognition dataset.
Human Activity Recognition (HAR) is a dataset for classification with time-series inputs.
Please download and unzip the dataset as a zip here and update the tutorial data path accordingly.
HAR has been collected from 30 subjects performing six different activities (Walking, Walking Upstairs, Walking Downstairs, Sitting, Standing, Laying). It consists in inertial sensor data that was collected with a smartphone carried by the subjects.
The following image summarizes the dataset.
Please follow this end-to-end tutorial to prepare the dataset, create and train the model, and finally compute explanations.
Prepare the Dataset#
1. Split and Convert your Raw Data#
The first step consists in creating your train, test and validation splits as StandardDataset
.
As we only have a train and test files, we will use 20% of the train split to create a validation split.
Each ".txt" file in "Inertial Signals" folder corresponds to a single input channel, the target being in the "y_test" and "y_train" files.
We extract each feature file by file, as numpy array
format, to get input features "human_activity" as a single array
batch_size x num_timestamps x num_channels.
Let's transform the train (and validation) and the test data:
import pandas as pd
import numpy as np
from pathlib import Path
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
We map the target values to their labels, and build the DataFrame with train and validation data.
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
Warning
We need to convert the multidimensional array "human_activity" to a list of list, as pandas.DataFrame
does not handle
this format natively.
We can now split the train data into a train and validation set.
import numpy as np
from sklearn.model_selection import train_test_split
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
As stated in the doc, Xpdeep requires a ".parquet" file to create the dataset. The original data is stored as a ".txt" file, therefore each split must be converted to a ".parquet" file.
Tip
To get your ".parquet" files, you can easily convert each split from pandas.DataFrame
to pyarrow.Table
first.
Like pandas.DataFrame
, pyarrow.Table
does not support multidimensional arrays, please ensure to convert
arrays to lists first.
Warning
Here with set preserve_index
to False in order to remove the DataFrame "index" column from the resulting Pyarrow Table.
import pyarrow as pa
import pyarrow.parquet as pq
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
2. Upload your Converted Data#
Warning
Don't forget to set up a Project
and initialize the API with your credentials !
from xpdeep import init, set_project
from xpdeep.project import Project
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
With your Project
set up, you can upload the converted parquet files into Xpdeep server.
from xpdeep.dataset.upload import upload
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
3. Find a schema#
For HAR, we cannot use the AutoAnalyzer
as it does not support time serie feature yet. We need therefore to create an
AnalyzedSchema
from scratch. We add a time serie feature
(9 channels, asynchronous), and a categorical feature for the target (the 6 activities to classify).
We use the custom preprocessor class ScaleHAR
that inherit from TorchPreprocessor
, and allow us to use torch.Tensor
to scale the time serie.
import torch
from xpdeep.dataset.schema.preprocessor import TorchPreprocessor
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0,1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0,1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
Let's now define the AnalyzedSchema
.
from xpdeep.dataset.schema.schema import AnalyzedSchema
from sklearn.preprocessing import OneHotEncoder
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
print(analyzed_schema)
+----------------------------------------------------------------+
| Schema Contents |
+-----------------------------------+----------------+-----------+
| Type | Name | Is Target |
+-----------------------------------+----------------+-----------+
| IndexMetadata | index_xp_deep | |
| MultivariateTimeSeries | human_activity | ❌ |
| CategoricalFeature | activity | ✅ |
+-----------------------------------+----------------+-----------+
Tip
Categories in the CategoricalFeature
are automatically inferred with the preprocessor object after the fitting step.
The categorical feature categories
attribute is None prior to the fitting step.
Finally, we can create the AnalyzedParquetDataset
. Test and Validation datasets will be created later.
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset
# Create a train dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
Tip
Here we did not build a ParquetDataset
first as we create the dataset straight from the existing analyzed schema.
The ParquetDataset
interface serves only as an intermediate class, used to obtain an AnalyzedParquetDataset
via the AutoAnalyzer
and its analyze
method.
5. Fit the schema#
With your AnalyzedSchema
ready, you can now fit the schema to fit each feature preprocessor on the train set.
Note
Only the SklearnPreprocessor
will be fitted when calling analyzed_train_dataset.fit()
as ScaleHAR
does not
required a fitting step.
We use the same FittedSchema
to create a FittedParquetDataset
corresponding to the validation and test set.
from xpdeep.dataset.parquet_dataset import FittedParquetDataset
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
And that's all for the dataset preparation. We now have three FittedParquetDataset
, each with its FittedSchema
,
ready to be used.
Prepare the Model#
We need now to create an explainable model XpdeepModel
.
1. Create the required torch models#
We have a classification task with time serie input data. We will use a basic Convolutional Neural Network for this task.
Tip
Model input and output sizes (including the batch dimension) can be easily retrieved from the fitted schema.
Therefore, we chose:
- The
FeatureExtractionModel
that will embed input data into a 128 dimensions space. - The
TaskLearnerModel
that will return an output of size 6.
import torch
from torch.nn import Sequential
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
2. Explainable Model Specifications#
Here comes the crucial part: we need to specify model specifications under ModelDecisionGraphParameters
to get the best explanations (Model Decision Graph and Inference Graph).
from xpdeep.model.model_builder import ModelDecisionGraphParameters
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
For further details, see docs
Note
All parameters have a default value, you can start by using those default value, then iterate and update the configuration to find suitable explanations.
3. Create the Explainable Model#
Given the model architecture and configuration, we can finally instantiate the explainable model XpdeepModel
.
from xpdeep.model.xpdeep_model import XpdeepModel
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
Train#
The train step is straightforward: we need to specify the Trainer
parameters.
from functools import partial
import torch
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassF1Score, MulticlassConfusionMatrix
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
Warning
Here, we set foreach
and fused
to False as currently it may leads to unstable behaviour in the training process.
We can now train the model:
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
The training logs are displayed in the console:
Epoch 1/20 - Loss: 1.790: 2%|▏ | 1/46 [00:04<03:22, 4.49s/it]
Epoch 1/20 - Loss: 1.790: 2%|▏ | 1/46 [00:05<03:22, 4.49s/it]
Epoch 1/20 - Loss: 1.576: 4%|▍ | 2/46 [00:06<02:04, 2.83s/it]
Epoch 1/20 - Loss: 1.576: 4%|▍ | 2/46 [00:06<02:04, 2.83s/it]
Once the model trained, it can be used to get explanations.
Explain#
Similarly to the Trainer
, explanations are computed with an Explainer
interface.
1. Build the Explainer#
We provide the Explainer
quality metrics to get insights on the explanation quality. In addition, we compute
along with the explanations histograms to get a detailed distribution on targets and predictions. Finally, we set description_representativeness
to 1000.
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
statistics = DictStats(
distribution_target=DistributionStat(on="target"),
distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics,
metrics=metrics, statistics=statistics
)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
Tip
Here we reuse metrics
from the train stage for convenience, but they can be adapted to your needs !
2. Model Functioning Explanations#
Model Functioning Explanations are computed with the global_explain
method.
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
We can visualize explanations with XpViz
, using the link in model_explanations.visualisation_link
, if you already
have requested the correct credentials.
3. Inference and their Causal Explanations#
We need a subset of samples to compute Causal Explanations on. Here we filter the test set to take only the first 100 samples.
from xpdeep.filtering.filter import Filter
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
Future Release
Time serie filters will be implemented to filter based on channel values.
Explanation can then be computed using the local_explain
method from the Explainer
.
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
👀 Full file preview
"""HAR workflow, classification, time series data."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from torch.nn import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics.classification import MulticlassAccuracy, MulticlassConfusionMatrix, MulticlassF1Score
from xpdeep import init, set_project
from xpdeep.dataset.parquet_dataset import AnalyzedParquetDataset, FittedParquetDataset
from xpdeep.dataset.schema.feature.feature import (
CategoricalFeature,
MultivariateTimeSeries,
)
from xpdeep.dataset.schema.preprocessor import SklearnPreprocessor, TorchPreprocessor
from xpdeep.dataset.schema.schema import AnalyzedSchema
from xpdeep.dataset.upload import upload
from xpdeep.explain.explainer import Explainer
from xpdeep.explain.quality_metrics import Infidelity, Sensitivity
from xpdeep.explain.statistic import DictStats, DistributionStat
from xpdeep.filtering.filter import Filter
from xpdeep.metric import DictMetrics, TorchGlobalMetric, TorchLeafMetric
from xpdeep.model.model_builder import ModelDecisionGraphParameters
from xpdeep.model.xpdeep_model import XpdeepModel
from xpdeep.model.zoo.cross_entropy_loss_from_proba import CrossEntropyLossFromProbabilities
from xpdeep.project import Project, get_project
from xpdeep.trainer.callbacks import EarlyStopping, Scheduler
from xpdeep.trainer.trainer import Trainer
def main():
"""Process the dataset, train, and explain the model."""
torch.random.manual_seed(5)
# ##### Prepare the Dataset #######
# 1. Split and Convert your Raw Data
# Read train data
features_dict = {}
split_name = "train"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
train_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
train_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
# Read test data
features_dict = {}
split_name = "test"
for feature_filepath in sorted(Path(f"{split_name}/Inertial Signals/").rglob("*.txt")):
feature_name = feature_filepath.stem
features_dict[feature_name] = np.squeeze(
pd.read_csv(feature_filepath, sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
test_inputs = np.transpose(np.stack(list(features_dict.values()), axis=1), (0, 2, 1))
test_targets = np.squeeze(
pd.read_csv(f"{split_name}/y_{split_name}.txt", sep=r"\s+", header=None).to_numpy(dtype=np.float32)
)
activity_mapping = {
1: "Walking",
2: "Walking upstairs",
3: "Walking downstairs",
4: "Sitting",
5: "Standing",
6: "Laying",
}
targets_mapper = np.vectorize(lambda x: activity_mapping[x])
train_targets = targets_mapper(train_targets) # Map targets to their labels.
test_targets = targets_mapper(test_targets)
test_val_data = pd.DataFrame.from_dict({"human_activity": test_inputs.tolist(), "activity": test_targets})
train_data = pd.DataFrame.from_dict({"human_activity": train_inputs.tolist(), "activity": train_targets})
test_data, val_data = train_test_split(test_val_data, test_size=0.5, random_state=42)
print(f"Input shape : {np.array(train_data["human_activity"].to_list()).shape}")
# Convert to pyarrow Table format
train_table = pa.Table.from_pandas(train_data, preserve_index=False)
val_table = pa.Table.from_pandas(val_data, preserve_index=False)
test_table = pa.Table.from_pandas(test_data, preserve_index=False)
# Save each split as ".parquet" file
pq.write_table(train_table, "train.parquet")
pq.write_table(val_table, "val.parquet")
pq.write_table(test_table, "test.parquet")
# 2. Upload your Converted Data
directory = upload(
directory_name="har_uploaded",
train_set_path="train.parquet",
test_set_path="test.parquet",
val_set_path="val.parquet",
)
# 3. Define preprocessors
class ScaleHAR(TorchPreprocessor):
def __init__(self, input_size: tuple[int, ...]):
"""Initialize the scaler."""
super().__init__(input_size=input_size)
self.mean = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).mean(dim=(0, 1))
)
self.std = torch.nn.Parameter(
torch.tensor(train_table.column("human_activity").to_pylist()).std(dim=(0, 1))
)
def transform(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform."""
return (inputs - self.mean) / self.std
def inverse_transform(self, output: torch.Tensor) -> torch.Tensor:
"""Apply inverse transform."""
return output * self.std + self.mean
# 4. Find a schema
analyzed_schema = AnalyzedSchema(
MultivariateTimeSeries(
asynchronous=True,
channel_names=[
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z",
],
name="human_activity",
preprocessor=ScaleHAR(input_size=(128, 9)),
),
CategoricalFeature(
is_target=True,
name="activity",
preprocessor=SklearnPreprocessor(preprocess_function=OneHotEncoder(sparse_output=False)),
),
)
# Create a dataset from the analyzed schema.
analyzed_train_dataset = AnalyzedParquetDataset(
split_name="train",
identifier_name="my_local_dataset",
path=directory["train_set_path"],
analyzed_schema=analyzed_schema,
)
print(analyzed_schema)
# 5. Fit the schema
fit_train_dataset = analyzed_train_dataset.fit()
fit_test_dataset = FittedParquetDataset(
split_name="test",
identifier_name="my_local_dataset",
path=directory["test_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
fit_val_dataset = FittedParquetDataset(
split_name="val",
identifier_name="my_local_dataset",
path=directory["val_set_path"],
fitted_schema=fit_train_dataset.fitted_schema,
)
# ##### Prepare the Model #######
# 1. Create the required torch models
input_size = fit_train_dataset.fitted_schema.input_size[1:]
target_size = fit_train_dataset.fitted_schema.target_size[1]
print(f"input_size: {input_size} - target_size: {target_size}")
class FeatureExtractor(Sequential):
def __init__(self):
layers = [
torch.nn.Conv1d(9, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Flatten(),
]
super().__init__(*layers)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
x = inputs.transpose(1, 2)
return super().forward(x)
class TaskLearner(Sequential):
def __init__(self):
layers = [
torch.nn.LazyLinear(out_features=6),
torch.nn.Softmax(dim=-1)
]
super().__init__(*layers)
# 2. Explainable Model Specifications
explanation_architecture = ModelDecisionGraphParameters(
graph_depth=3,
discrimination_weight=0.1,
target_homogeneity_weight=2.0,
prune_step=11,
target_homogeneity_pruning_threshold=0.7,
population_pruning_threshold=0.05,
balancing_weight=1.0,
)
# 3. Create the Explainable Model
xpdeep_model = XpdeepModel.from_torch(
fitted_schema=fit_train_dataset.fitted_schema,
feature_extraction=FeatureExtractor(),
task_learner=TaskLearner(),
decision_graph_parameters=explanation_architecture,
)
# ##### Train #######
target_size = fit_train_dataset.fitted_schema.target_size[1]
# Metrics to monitor the training.
metrics = DictMetrics(
global_multi_class_accuracy=TorchGlobalMetric(
partial(MulticlassAccuracy, num_classes=target_size, average="micro"), target_as_indexes=True),
leaf_multi_class_accuracy=TorchLeafMetric(partial(MulticlassAccuracy, num_classes=target_size, average="micro"),
target_as_indexes=True),
global_multi_class_F1_score=TorchGlobalMetric(
partial(MulticlassF1Score, num_classes=target_size, average="macro"), target_as_indexes=True),
leaf_multi_class_F1_score=TorchLeafMetric(partial(MulticlassF1Score, num_classes=target_size, average="macro"),
target_as_indexes=True),
global_confusion_matrix=TorchGlobalMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
leaf_confusion_matrix=TorchLeafMetric(
partial(MulticlassConfusionMatrix, num_classes=target_size, normalize="all"), target_as_indexes=True),
)
callbacks = [
EarlyStopping(monitoring_metric="Total loss", mode="minimize", patience=5),
Scheduler(
pre_scheduler=partial(ReduceLROnPlateau, patience=3, mode="max"),
step_method="epoch",
monitoring_metric="global_multi_class_accuracy",
),
]
# Optimizer is a partial object as pytorch needs to give the model as optimizer parameter.
optimizer = partial(torch.optim.AdamW, lr=0.001, foreach=False, fused=False)
trainer = Trainer(
loss=CrossEntropyLossFromProbabilities(reduction="none"),
optimizer=optimizer,
callbacks=callbacks,
start_epoch=0,
max_epochs=20,
metrics=metrics,
)
trained_model = trainer.train(
model=xpdeep_model,
train_set=fit_train_dataset,
validation_set=fit_val_dataset,
batch_size=128,
)
# ##### Explain #######
# 1. Build the Explainer
statistics = DictStats(
distribution_target=DistributionStat(on="target"), distribution_prediction=DistributionStat(on="prediction")
)
quality_metrics = [Sensitivity(), Infidelity()]
explainer = Explainer(
description_representativeness=1000, quality_metrics=quality_metrics, metrics=metrics, statistics=statistics
)
# 2. Model Functioning Explanations
model_explanations = explainer.global_explain(
trained_model,
train_set=fit_train_dataset,
test_set=fit_test_dataset,
validation_set=fit_val_dataset,
)
print(model_explanations.visualisation_link)
# 3. Inference and their Causal Explanations
my_filter = Filter("testing_filter", fit_test_dataset, row_indexes=list(range(100)))
causal_explanations = explainer.local_explain(trained_model, fit_test_dataset, my_filter)
print(causal_explanations.visualisation_link)
if __name__ == "__main__":
init(api_key="api_key", api_url="api_url")
set_project(Project.create_or_get(name="Har Tutorial"))
try:
main()
finally:
get_project().delete()
We can again visualize causal explanations using the visualisation_link
.