Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename functions and quantities of interest #26

Merged
merged 3 commits into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions doc/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ This is the full API documentation of the `sharp` package.
qoi.DiffQoI
qoi.FlipQoI
qoi.LikelihoodQoI
qoi.RankingQoI
qoi.RankingScoreQoI
qoi.RankQoI
qoi.RankScoreQoI
qoi.TopKQoI


Expand Down Expand Up @@ -68,4 +68,4 @@ This is the full API documentation of the `sharp` package.
utils.check_inputs
utils.check_measure
utils.check_qoi
utils.scores_to_rank
utils.scores_to_ordering
8 changes: 4 additions & 4 deletions examples/plot_basic_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def score_function(X):
# Next, we will set up ``ShaRP``:

xai = ShaRP(
qoi="ranking",
qoi="rank",
target_function=score_function,
measure="shapley",
sample_size=None,
Expand All @@ -50,8 +50,8 @@ def score_function(X):
######################################################################################
# Let's take a look at some shapley values used for ranking explanations:

print("Global contribution of a single feature:", xai.feature(0, X))
print("Global feature contributions:", xai.all(X).mean(axis=0))
print("Aggregate contribution of a single feature:", xai.feature(0, X))
print("Aggregate feature contributions:", xai.all(X).mean(axis=0))

individual_scores = xai.individual(9, X)
print("Feature contributions to a single observation: ", individual_scores)
Expand All @@ -76,6 +76,6 @@ def score_function(X):

# Waterfall explaining rank for sample 2
axes[1] = xai.plot.waterfall(individual_scores)
axes[1].suptitle("Ranking explanation for Sample 9")
axes[1].suptitle("Rank explanation for Sample 9")

plt.show()
13 changes: 0 additions & 13 deletions sharp/base.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,5 @@
"""
Base object used to set up explainability objects.

Topics that must be covered:
- Single-observation explanation
- Global-input explanation
* Set qii
* Unary qii
* Marginal qii
* Shapley
* Banzhaff

TODO: Check params functions (data/object types and such)
TODO: Parallelization/Vectorization
TODO: Ensure inputs are converted to numpy arrays
"""

import numpy as np
Expand Down
8 changes: 4 additions & 4 deletions sharp/qoi/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
DiffQoI,
FlipQoI,
LikelihoodQoI,
RankingQoI,
RankingScoreQoI,
RankQoI,
RankScoreQoI,
TopKQoI,
QOI_OBJECTS,
)
Expand All @@ -16,8 +16,8 @@
"DiffQoI",
"FlipQoI",
"LikelihoodQoI",
"RankingQoI",
"RankingScoreQoI",
"RankQoI",
"RankScoreQoI",
"TopKQoI",
"QOI_OBJECTS",
]
16 changes: 8 additions & 8 deletions sharp/qoi/_qoi.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .base import BaseQoI, BaseRankingQoI
from .base import BaseQoI, BaseRankQoI


class DiffQoI(BaseQoI):
Expand Down Expand Up @@ -80,9 +80,9 @@ def _calculate(self, rows1, rows2):
return self.estimate(rows1) - self.estimate(rows2) # .mean()


class RankingQoI(BaseRankingQoI):
class RankQoI(BaseRankQoI):
"""
Ranking specific QoI. Uses rank as the quantity being measured. The influence score
Rank specific QoI. Uses rank as the quantity being measured. The influence score
is based on the comparison between the rank of a sample and synthetic data (based on
the original sample). ``target_function`` should output scores.

Expand All @@ -98,7 +98,7 @@ def _calculate(self, rows1, rows2):
return (self.estimate(rows2) - self.estimate(rows1)).mean()


class RankingScoreQoI(BaseRankingQoI):
class RankScoreQoI(BaseRankQoI):
"""
A general, ranking-oriented QoI, similar to ``DiffQoI``. ``target_function`` must
output scores.
Expand All @@ -115,9 +115,9 @@ def _calculate(self, rows1, rows2):
return (self.estimate(rows1) - self.estimate(rows2)).mean()


class TopKQoI(BaseRankingQoI):
class TopKQoI(BaseRankQoI):
"""
Ranking-specific QoI. Estimates the likelihood of reaching the top-K as the
Rank-specific QoI. Estimates the likelihood of reaching the top-K as the
quantity of interest.

Parameters
Expand All @@ -142,7 +142,7 @@ def _calculate(self, rows1, rows2):
"diff": DiffQoI,
"flip": FlipQoI,
"likelihood": LikelihoodQoI,
"ranking": RankingQoI,
"ranking_score": RankingScoreQoI,
"rank": RankQoI,
"rank_score": RankScoreQoI,
"top_k": TopKQoI,
}
2 changes: 1 addition & 1 deletion sharp/qoi/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def _calculate(self, rows1, rows2):
pass


class BaseRankingQoI(BaseQoI, metaclass=ABCMeta):
class BaseRankQoI(BaseQoI, metaclass=ABCMeta):
"""
Base class to implement Quantities of Interest (QoI) for ranking tasks. It should not
be used directly. Any QoI must define at least 2 functions:
Expand Down
2 changes: 1 addition & 1 deletion sharp/tests/test_basic_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
N_SAMPLES = 50
rng = check_random_state(RNG_SEED)

rank_qois_str = ["ranking", "ranking_score", "top_k"]
rank_qois_str = ["rank", "rank_score", "top_k"]
rank_qois_obj = [QOI_OBJECTS[qoi] for qoi in rank_qois_str]

clf_qois_str = ["diff", "flip", "likelihood"]
Expand Down
4 changes: 2 additions & 2 deletions sharp/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
"""

from ._checks import check_feature_names, check_inputs, check_measure, check_qoi
from ._rank_utils import scores_to_rank
from ._rank_utils import scores_to_ordering

__all__ = [
"check_feature_names",
"check_inputs",
"check_measure",
"check_qoi",
"scores_to_rank",
"scores_to_ordering",
]
2 changes: 1 addition & 1 deletion sharp/utils/_rank_utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np


def scores_to_rank(y, direction=-1):
def scores_to_ordering(y, direction=-1):
"""
Converts an array with scores to a ranking.

Expand Down
2 changes: 0 additions & 2 deletions sharp/visualization/_waterfall.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
import numpy as np
from sharp.utils._utils import _optional_import

# from sharp.utils import scores_to_rank


blue_rgb = np.array([0, 0.54337757, 0.98337906])
light_blue_rgb = np.array([127.0, 196, 252]) / 255
Expand Down
Loading