chanelcolgate commited on
Commit
ce530c1
·
1 Parent(s): cfea7d1

modified: average_precision.py

Browse files
Files changed (1) hide show
  1. average_precision.py +48 -21
average_precision.py CHANGED
@@ -86,19 +86,24 @@ Args:
86
  Returns:
87
  accuracy: description of the first score,
88
  another_score: description of the second score,
 
 
 
 
 
 
89
  Examples:
90
  Examples should be written in doctest format, and should illustrate how
91
  to use the function.
92
 
93
- >>> my_new_module = evaluate.load("my_new_module")
94
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
95
- >>> print(results)
96
- {'accuracy': 1.0}
 
 
97
  """
98
 
99
- # TODO: Define external resources urls if needed
100
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
101
-
102
 
103
  @evaluate.utils.file_utils.add_start_docstrings(
104
  _DESCRIPTION, _KWARGS_DESCRIPTION
@@ -115,17 +120,29 @@ class AveragePrecision(evaluate.Metric):
115
  citation=_CITATION,
116
  inputs_description=_KWARGS_DESCRIPTION,
117
  # This defines the format of each prediction and reference
118
- features=datasets.Features(
119
- {
120
- "predictions": datasets.Value("int64"),
121
- "references": datasets.Value("int64"),
122
- }
123
- ),
 
 
 
 
 
 
 
 
 
 
 
 
124
  # Homepage of the module for documentation
125
- homepage="http://module.homepage",
126
  # Additional links to the codebase or references
127
- codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
128
- reference_urls=["http://path.to.reference.url/new_module"],
129
  )
130
 
131
  def _download_and_prepare(self, dl_manager):
@@ -133,12 +150,22 @@ class AveragePrecision(evaluate.Metric):
133
  # TODO: Download external resources if needed
134
  pass
135
 
136
- def _compute(self, predictions, references):
 
 
 
 
 
 
 
137
  """Returns the scores"""
138
  # TODO: Compute the different scores of the module
139
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(
140
- predictions
141
- )
142
  return {
143
- "accuracy": accuracy,
 
 
 
 
 
 
144
  }
 
86
  Returns:
87
  accuracy: description of the first score,
88
  another_score: description of the second score,
89
+ average_precision: float
90
+ Average precision score.
91
+ See Also
92
+ roc_auc_score: Compute the area under the ROC curve.
93
+ precision_recall_curve: Compute precision-recall pairs for different
94
+ probability thresholds.
95
  Examples:
96
  Examples should be written in doctest format, and should illustrate how
97
  to use the function.
98
 
99
+ >>> import numpy as np
100
+ >>> from sklearn.metrics import average_precision_score
101
+ >>> y_true = np.array([0, 0, 1, 1])
102
+ >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
103
+ >>> average_precision_score(y_true, y_scores)
104
+ 0.8333333333333333
105
  """
106
 
 
 
 
107
 
108
  @evaluate.utils.file_utils.add_start_docstrings(
109
  _DESCRIPTION, _KWARGS_DESCRIPTION
 
120
  citation=_CITATION,
121
  inputs_description=_KWARGS_DESCRIPTION,
122
  # This defines the format of each prediction and reference
123
+ features=[
124
+ datasets.Features(
125
+ {
126
+ "references": datasets.Value("int64"),
127
+ "prediction_scores": datasets.Value("float"),
128
+ }
129
+ ),
130
+ datasets.Features(
131
+ {
132
+ "references": datasets.Sequence(
133
+ datasets.Value("int64")
134
+ ),
135
+ "prediction_scores": datasets.Sequence(
136
+ datasets.Value("float")
137
+ ),
138
+ }
139
+ ),
140
+ ],
141
  # Homepage of the module for documentation
142
+ homepage="https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html",
143
  # Additional links to the codebase or references
144
+ codebase_urls=["https://github.com/scikit-learn/scikit-learn"],
145
+ reference_urls=["https://scikit-learn.org/stable/index.html"],
146
  )
147
 
148
  def _download_and_prepare(self, dl_manager):
 
150
  # TODO: Download external resources if needed
151
  pass
152
 
153
+ def _compute(
154
+ self,
155
+ references,
156
+ prediction_scores,
157
+ average="macro",
158
+ pos_label=1,
159
+ sample_weight=None,
160
+ ):
161
  """Returns the scores"""
162
  # TODO: Compute the different scores of the module
 
 
 
163
  return {
164
+ "average_precision_score": average_precision_score(
165
+ y_true=references,
166
+ y_score=prediction_scores,
167
+ average=average,
168
+ pos_label=pos_label,
169
+ sample_weight=sample_weight,
170
+ )
171
  }