@@ -53,7 +53,15 @@ def random_y_true_raw_prediction(
53
53
):
54
54
"""Random generate y_true and raw_prediction in valid range."""
55
55
rng = np .random .RandomState (seed )
56
- if loss .n_classes <= 2 :
56
+ if loss .is_multiclass :
57
+ raw_prediction = np .empty ((n_samples , loss .n_classes ))
58
+ raw_prediction .flat [:] = rng .uniform (
59
+ low = raw_bound [0 ],
60
+ high = raw_bound [1 ],
61
+ size = n_samples * loss .n_classes ,
62
+ )
63
+ y_true = np .arange (n_samples ).astype (float ) % loss .n_classes
64
+ else :
57
65
raw_prediction = rng .uniform (
58
66
low = raw_bound [0 ], high = raw_bound [0 ], size = n_samples
59
67
)
@@ -73,14 +81,6 @@ def random_y_true_raw_prediction(
73
81
and loss .interval_y_true .high_inclusive
74
82
):
75
83
y_true [1 :: (n_samples // 3 )] = 1
76
- else :
77
- raw_prediction = np .empty ((n_samples , loss .n_classes ))
78
- raw_prediction .flat [:] = rng .uniform (
79
- low = raw_bound [0 ],
80
- high = raw_bound [1 ],
81
- size = n_samples * loss .n_classes ,
82
- )
83
- y_true = np .arange (n_samples ).astype (float ) % loss .n_classes
84
84
85
85
return y_true , raw_prediction
86
86
@@ -105,11 +105,11 @@ def numerical_derivative(func, x, eps):
105
105
def test_loss_boundary (loss ):
106
106
"""Test interval ranges of y_true and y_pred in losses."""
107
107
# make sure low and high are always within the interval, used for linspace
108
- if loss .n_classes is None or loss .n_classes <= 2 :
108
+ if loss .is_multiclass :
109
+ y_true = np .linspace (0 , 9 , num = 10 )
110
+ else :
109
111
low , high = _inclusive_low_high (loss .interval_y_true )
110
112
y_true = np .linspace (low , high , num = 10 )
111
- else :
112
- y_true = np .linspace (0 , 9 , num = 10 )
113
113
114
114
# add boundaries if they are included
115
115
if loss .interval_y_true .low_inclusive :
@@ -120,13 +120,13 @@ def test_loss_boundary(loss):
120
120
assert loss .in_y_true_range (y_true )
121
121
122
122
low , high = _inclusive_low_high (loss .interval_y_pred )
123
- if loss .n_classes is None or loss .n_classes <= 2 :
124
- y_pred = np .linspace (low , high , num = 10 )
125
- else :
123
+ if loss .is_multiclass :
126
124
y_pred = np .empty ((10 , 3 ))
127
125
y_pred [:, 0 ] = np .linspace (low , high , num = 10 )
128
126
y_pred [:, 1 ] = 0.5 * (1 - y_pred [:, 0 ])
129
127
y_pred [:, 2 ] = 0.5 * (1 - y_pred [:, 0 ])
128
+ else :
129
+ y_pred = np .linspace (low , high , num = 10 )
130
130
131
131
assert loss .in_y_pred_range (y_pred )
132
132
@@ -153,7 +153,7 @@ def test_loss_boundary(loss):
153
153
]
154
154
# y_pred and y_true do not always have the same domain (valid value range).
155
155
# Hence, we define extra sets of parameters for each of them.
156
- Y_TRUE_PARAMS = [
156
+ Y_TRUE_PARAMS = [ # type: ignore
157
157
# (loss, [y success], [y fail])
158
158
(HalfPoissonLoss (), [0 ], []),
159
159
(HalfTweedieLoss (power = - 3 ), [- 100 , - 0.1 , 0 ], []),
@@ -185,7 +185,8 @@ def test_loss_boundary_y_true(loss, y_true_success, y_true_fail):
185
185
186
186
187
187
@pytest .mark .parametrize (
188
- "loss, y_pred_success, y_pred_fail" , Y_COMMON_PARAMS + Y_PRED_PARAMS
188
+ "loss, y_pred_success, y_pred_fail" ,
189
+ Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore
189
190
)
190
191
def test_loss_boundary_y_pred (loss , y_pred_success , y_pred_fail ):
191
192
"""Test boundaries of y_pred for loss functions."""
@@ -211,16 +212,16 @@ def test_loss_dtype(
211
212
float64, and all output arrays are either all float32 or all float64.
212
213
"""
213
214
loss = loss ()
214
- if loss .n_classes <= 2 :
215
- # generate a y_true in valid range
216
- low , high = _inclusive_low_high (loss .interval_y_true , dtype = dtype_in )
217
- y_true = np .array ([0.5 * (high - low )], dtype = dtype_in )
218
- raw_prediction = np .array ([0.0 ], dtype = dtype_in )
219
- else :
215
+ # generate a y_true and raw_prediction in valid range
216
+ if loss .is_multiclass :
220
217
y_true = np .array ([0 ], dtype = dtype_in )
221
218
raw_prediction = np .full (
222
219
shape = (1 , loss .n_classes ), fill_value = 0.0 , dtype = dtype_in
223
220
)
221
+ else :
222
+ low , high = _inclusive_low_high (loss .interval_y_true , dtype = dtype_in )
223
+ y_true = np .array ([0.5 * (high - low )], dtype = dtype_in )
224
+ raw_prediction = np .array ([0.0 ], dtype = dtype_in )
224
225
225
226
if sample_weight is not None :
226
227
sample_weight = np .array ([2.0 ], dtype = dtype_in )
@@ -251,7 +252,7 @@ def test_loss_dtype(
251
252
gradient = out2 ,
252
253
n_threads = n_threads ,
253
254
)
254
- if out1 is not None and loss .n_classes >= 3 :
255
+ if out1 is not None and loss .is_multiclass :
255
256
out1 = np .empty_like (raw_prediction , dtype = dtype_out )
256
257
loss .gradient_hessian (
257
258
y_true = y_true ,
@@ -350,7 +351,7 @@ def test_loss_same_as_C_functions(loss, sample_weight):
350
351
def test_loss_gradients_are_the_same (loss , sample_weight ):
351
352
"""Test that loss and gradient are the same across different functions.
352
353
353
- Also test that output arguments contain correct result .
354
+ Also test that output arguments contain correct results .
354
355
"""
355
356
y_true , raw_prediction = random_y_true_raw_prediction (
356
357
loss = loss ,
@@ -410,7 +411,7 @@ def test_loss_gradients_are_the_same(loss, sample_weight):
410
411
assert np .shares_memory (g3 , out_g3 )
411
412
412
413
if hasattr (loss , "gradient_proba" ):
413
- assert loss .n_classes >= 3 # only for CategoricalCrossEntropy
414
+ assert loss .is_multiclass # only for CategoricalCrossEntropy
414
415
out_g4 = np .empty_like (raw_prediction )
415
416
out_proba = np .empty_like (raw_prediction )
416
417
g4 , proba = loss .gradient_proba (
0 commit comments