Skip to content

explainer

How to explain a trained model.

Modules:

Name Description
jobs

Jobs utilities.

Classes:

Name Description
Explainer

Explain a XpdeepModel.

Explainer #

Explain a XpdeepModel.

Parameters:

Name Type Description Default

description_representativeness #

int

A parameter governing the explanation quality, the greater, the better, but it will be slower to compute.

required

quality_metrics #

list[QualityMetrics]

A list of quality metrics to compute, like Sensitivity or Infidelity.

required

window_size #

int | None

DTW parameter windows (proportion %)

None

metrics #

DictMetrics | None

A list of metrics to compute along with the explanation (F1 score etc.)

None

statistics #

DictMetrics | None

A list of statistics to compute along with the explanation (Variance on targets etc.)

None

batch_size #

int | None

The batch size to use during explanation. Default to None.

None

seed #

int | None

The seed to use during explanation. Default to None.

None

Methods:

Name Description
local_explain

Create a causal explanation from trained model.

global_explain

Compute model decision on a trained model.

Attributes:

Name Type Description
description_representativeness int
quality_metrics list[QualityMetrics]
window_size int | None
metrics DictMetrics | None
statistics DictStats | None
batch_size int | None
seed int | None

description_representativeness: int #

quality_metrics: list[QualityMetrics] #

window_size: int | None = None #

metrics: DictMetrics | None = None #

statistics: DictStats | None = None #

batch_size: int | None = None #

seed: int | None = None #

local_explain(trained_model: TrainedModelArtifact, train_set: FittedParquetDataset, dataset_filter: Filter, *, explanation_name: str | None = None, explanation_description: str | None = None) -> ExplanationArtifact #

Create a causal explanation from trained model.

Parameters:

Name Type Description Default
trained_model #
TrainedModelArtifact

A model trained via the trainer interface.

required
train_set #
FittedParquetDataset

A dataset representing a train split.

required
dataset_filter #
Filter

A filter used to filter the dataset and get samples to explain.

required
explanation_name #
str | None

The explanation name.

None
explanation_description #
str | None

The explanation description.

None

Returns:

Type Description
ExplanationResultsModel

The causal explanation results, containing the result as json.

Source code in src/xpdeep/explain/explainer.py
def local_explain(
    self,
    trained_model: TrainedModelArtifact,
    train_set: FittedParquetDataset,
    dataset_filter: Filter,
    *,
    explanation_name: str | None = None,
    explanation_description: str | None = None,
) -> ExplanationArtifact:
    """Create a causal explanation from trained model.

    Parameters
    ----------
    trained_model : TrainedModelArtifact
        A model trained via the trainer interface.
    train_set : FittedParquetDataset
        A dataset representing a train split.
    dataset_filter : Filter
        A filter used to filter the dataset and get samples to explain.
    explanation_name : str | None, default None
        The explanation name.
    explanation_description : str | None, default None
        The explanation description.


    Returns
    -------
    ExplanationResultsModel
        The causal explanation results, containing the result as json.
    """
    metrics = self.metrics._as_request_body if self.metrics is not None else None
    statistics = self.statistics._as_request_body if self.statistics is not None else None

    dataset_filter.apply()

    local_explanation_config = BuildConfigRequestBody(
        explain_config=BuildExplainConfigRequestBody(
            description_representativeness=self.description_representativeness,
            quality_metrics=[
                ExplanationCreateConfigQualityMetricBody.from_dict(quality_metric.to_dict())
                for quality_metric in self.quality_metrics
            ],
            metrics=metrics,
            statistics=statistics,
        ),
        batch_config=BuildBatchConfigRequestBody(batch_size=self.batch_size, seed=self.seed),
        read_dataset_config=BuildReadDatasetConfigRequestBody(),
    )

    explanation_create_request_body = ExplanationCreateRequestBody(
        filter_id=cast(str, dataset_filter.id),
        trained_model_id=trained_model.id,
        train_dataset_id=train_set.artifact_id,
        name=explanation_name,
        description=explanation_description,
        config=local_explanation_config,
    )

    compute_local_explanation_job = cast(
        JobModel,
        create_explanation.sync(
            project_id=Project.CURRENT.get().model.id,
            client=ClientFactory.CURRENT.get()(),
            body=explanation_create_request_body,
        ),
    )

    explanation_results_model = self._get_one_explanation(compute_local_explanation_job)

    return ExplanationArtifact(explanation_results_model)

global_explain(trained_model: TrainedModelArtifact, train_set: FittedParquetDataset, test_set: FittedParquetDataset | None = None, validation_set: FittedParquetDataset | None = None) -> ExplanationArtifact #

Compute model decision on a trained model.

Parameters:

Name Type Description Default
trained_model #
TrainedModelArtifact

A model trained via the trainer interface.

required
train_set #
FittedParquetDataset

A dataset representing a train split.

required
test_set #
FittedParquetDataset | None

A dataset representing a test split, used to optionally compute split statistics.

None
validation_set #
FittedParquetDataset | None

A dataset representing a validation split, used to optionally compute split statistics.

None

Returns:

Type Description
ExplanationResultsModel

The model decision results, containing the result as json.

Source code in src/xpdeep/explain/explainer.py
def global_explain(
    self,
    trained_model: TrainedModelArtifact,
    train_set: FittedParquetDataset,
    test_set: FittedParquetDataset | None = None,
    validation_set: FittedParquetDataset | None = None,
) -> ExplanationArtifact:
    """Compute model decision on a trained model.

    Parameters
    ----------
    trained_model : TrainedModelArtifact
        A model trained via the trainer interface.
    train_set : FittedParquetDataset
        A dataset representing a train split.
    test_set : FittedParquetDataset | None
        A dataset representing a test split, used to optionally compute split statistics.
    validation_set : FittedParquetDataset | None
        A dataset representing a validation split, used to optionally compute split statistics.

    Returns
    -------
    ExplanationResultsModel
        The model decision results, containing the result as json.
    """
    test_set_id = test_set.artifact_id if test_set is not None else None
    validation_set_id = validation_set.artifact_id if validation_set is not None else None

    metrics = self.metrics._as_request_body if self.metrics is not None else None
    statistics = self.statistics._as_request_body if self.statistics is not None else None

    description = f"Global explanation on trained model : {trained_model.name} - with id : {trained_model.id}"

    global_explanation_config = BuildConfigRequestBody(
        explain_config=BuildExplainConfigRequestBody(
            description_representativeness=self.description_representativeness,
            quality_metrics=[
                ExplanationCreateConfigQualityMetricBody.from_dict(quality_metric.to_dict())
                for quality_metric in self.quality_metrics
            ],
            metrics=metrics,
            statistics=statistics,
        ),
        batch_config=BuildBatchConfigRequestBody(batch_size=self.batch_size, seed=self.seed),
        read_dataset_config=BuildReadDatasetConfigRequestBody(),
    )
    body = GlobalExplanationCreateRequestBody(
        trained_model_id=trained_model.id,
        train_dataset_id=train_set.artifact_id,
        config=global_explanation_config,
        name=f"Global_explanation_{trained_model.name}_{trained_model.id}",
        description=description,
        test_dataset_id=test_set_id,
        validation_dataset_id=validation_set_id,
    )

    compute_global_explanation_job = cast(
        JobModel,
        create_global_explanation.sync(
            project_id=Project.CURRENT.get().model.id, client=ClientFactory.CURRENT.get()(), body=body
        ),
    )

    explanation_results_model = self._get_one_explanation(compute_global_explanation_job)
    return ExplanationArtifact(explanation_results_model)