-
Notifications
You must be signed in to change notification settings - Fork 302
Cleanup of simple_imputer #346
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
61b1a29
bbabad8
c92d039
60b9194
7a3e792
5490604
e790e71
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,9 +1,7 @@ | ||
from typing import Any, Dict, List, Optional, Union | ||
|
||
from ConfigSpace.configuration_space import ConfigurationSpace | ||
from ConfigSpace.hyperparameters import ( | ||
CategoricalHyperparameter | ||
) | ||
from ConfigSpace.hyperparameters import CategoricalHyperparameter | ||
|
||
import numpy as np | ||
|
||
|
@@ -15,92 +13,142 @@ | |
|
||
|
||
class SimpleImputer(BaseImputer): | ||
""" | ||
Impute missing values for categorical columns with '!missing!' | ||
(In case of numpy data, the constant value is set to -1, under | ||
the assumption that categorical data is fit with an Ordinal Scaler) | ||
"""An imputer for categorical and numerical columns | ||
Impute missing values for categorical columns with 'constant_!missing!' | ||
Note: | ||
In case of numpy data, the constant value is set to -1, under the assumption | ||
that categorical data is fit with an Ordinal Scaler. | ||
""" | ||
|
||
def __init__(self, | ||
random_state: Optional[Union[np.random.RandomState, int]] = None, | ||
numerical_strategy: str = 'mean', | ||
categorical_strategy: str = 'most_frequent'): | ||
def __init__( | ||
self, | ||
random_state: Optional[Union[np.random.RandomState, int]] = None, | ||
numerical_strategy: str = 'mean', | ||
categorical_strategy: str = 'most_frequent' | ||
): | ||
""" | ||
Note: | ||
Using 'constant' defaults to fill_value of 0 where 'constant_!missing!' | ||
uses a fill_value of -1. This behaviour should probably be fixed. | ||
eddiebergman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
Args: | ||
random_state (Optional[Union[np.random.RandomState, int]]): | ||
eddiebergman marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
The random state to use for the imputer. | ||
numerical_strategy (str: default='mean'): | ||
The strategy to use for imputing numerical columns. | ||
Can be one of ['mean', 'median', 'most_frequent', 'constant', 'constant_!missing!'] | ||
categorical_strategy (str: default='most_frequent') | ||
The strategy to use for imputing categorical columns. | ||
Can be one of ['mean', 'median', 'most_frequent', 'constant_zero'] | ||
|
||
""" | ||
super().__init__() | ||
self.random_state = random_state | ||
self.numerical_strategy = numerical_strategy | ||
self.categorical_strategy = categorical_strategy | ||
|
||
def fit(self, X: Dict[str, Any], y: Any = None) -> BaseImputer: | ||
""" | ||
The fit function calls the fit function of the underlying model | ||
and returns the transformed array. | ||
def fit(self, X: Dict[str, Any], y: Optional[Any] = None) -> BaseImputer: | ||
""" Fits the underlying model and returns the transformed array. | ||
Args: | ||
X (np.ndarray): input features | ||
y (Optional[np.ndarray]): input labels | ||
X (np.ndarray): | ||
The input features to fit on | ||
y (Optional[np.ndarray]): | ||
The labels for the input features `X` | ||
Returns: | ||
instance of self | ||
SimpleImputer: | ||
returns self | ||
""" | ||
self.check_requirements(X, y) | ||
categorical_columns = X['dataset_properties']['categorical_columns'] \ | ||
if isinstance(X['dataset_properties']['categorical_columns'], List) else [] | ||
if len(categorical_columns) != 0: | ||
|
||
# Choose an imputer for any categorical columns | ||
categorical_columns = X['dataset_properties']['categorical_columns'] | ||
|
||
if isinstance(categorical_columns, List) and len(categorical_columns) != 0: | ||
if self.categorical_strategy == 'constant_!missing!': | ||
self.preprocessor['categorical'] = SklearnSimpleImputer(strategy='constant', | ||
# Train data is numpy | ||
# as of this point, where | ||
# Ordinal Encoding is using | ||
# for categorical. Only | ||
# Numbers are allowed | ||
# fill_value='!missing!', | ||
fill_value=-1, | ||
copy=False) | ||
# Train data is numpy as of this point, where an Ordinal Encoding is used | ||
# for categoricals. Only Numbers are allowed for `fill_value` | ||
imputer = SklearnSimpleImputer(strategy='constant', fill_value=-1, copy=False) | ||
self.preprocessor['categorical'] = imputer | ||
else: | ||
self.preprocessor['categorical'] = SklearnSimpleImputer(strategy=self.categorical_strategy, | ||
copy=False) | ||
numerical_columns = X['dataset_properties']['numerical_columns'] \ | ||
if isinstance(X['dataset_properties']['numerical_columns'], List) else [] | ||
if len(numerical_columns) != 0: | ||
imputer = SklearnSimpleImputer(strategy=self.categorical_strategy, copy=False) | ||
self.preprocessor['categorical'] = imputer | ||
|
||
# Choose an imputer for any numerical columns | ||
numerical_columns = X['dataset_properties']['numerical_columns'] | ||
|
||
if isinstance(numerical_columns, List) and len(numerical_columns) > 0: | ||
if self.numerical_strategy == 'constant_zero': | ||
self.preprocessor['numerical'] = SklearnSimpleImputer(strategy='constant', | ||
fill_value=0, | ||
copy=False) | ||
imputer = SklearnSimpleImputer(strategy='constant', fill_value=0, copy=False) | ||
self.preprocessor['numerical'] = imputer | ||
else: | ||
self.preprocessor['numerical'] = SklearnSimpleImputer(strategy=self.numerical_strategy, copy=False) | ||
imputer = SklearnSimpleImputer(strategy=self.numerical_strategy, copy=False) | ||
self.preprocessor['numerical'] = imputer | ||
|
||
return self | ||
|
||
@staticmethod | ||
def get_hyperparameter_search_space( | ||
dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None, | ||
numerical_strategy: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter='numerical_strategy', | ||
value_range=("mean", "median", | ||
"most_frequent", | ||
"constant_zero"), | ||
default_value="mean", | ||
), | ||
numerical_strategy: HyperparameterSearchSpace = HyperparameterSearchSpace( | ||
hyperparameter='numerical_strategy', | ||
value_range=("mean", "median", "most_frequent", "constant_zero"), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be better if we can use
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can do that, you do however require to update these whenever sklearn updates them as well as that is where they are forwarded to. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I can see why you want to do that but it makes the code that uses it a little less pretty. For example, it's hard to see where the error with the follow code is: numerical_strategy: HyperparameterSearchSpace = HyperparameterSearchSpace(
hyperparameter='numerical_strategy',
value_range=NumericalImputerChoice.get_choices(),
default_value=NumericalImputerChoice.mean,
), It should be
... where as we would like
This is more readily achievable with a I will implement the Enum version and you can change it if you like or leave it as is. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Another clean'ish solution is just ditch the enum part. class Choices:
x: str = "x"
y: str = "y"
assert Choices.x = "x" The type is still a string, meaning it's easy to use, people don't need to know about the existence of this class to use it, they can still pass a string. It also allows for the internal code to use the class, where we do know about it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As another note, it also makes parameters extremely long, it's 101 characters which is over the character count limit of the checker: def __init__(
self,
random_state: Optional[np.random.RandomState] = None,
numerical_strategy: NumericalImputerChoice = NumericalImputerChoice.mean.value,
categorical_strategy: CategoricalImputerChoice = CategoricalImputerChoice.most_frequent.value
): There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I've not changed it for now, there's too many decisions to make that I think can be addressed based on how you would like it done yourself. I've changed it back to strings. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hey, thanks for the insights. Yeah, I think such changes should be a part of a separate PR as this PR is meant to clean up the messy statements in this file. Also, it would be better if the hyperparameter strings in all the components are consistent so it would require changing a lot of files which I think is beyond the scope of this PR. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We have the same thing, there is def f(s: Literal['yes', 'no'])
...
f("yes") # Type Error
param: Literal = "yes"
f(param) # Okay There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. All the problems come from the fact that we still need to follow python3.7.
Yeah, I agree with you. I am just uncomfortable using software that assumes that users google string choices (typically sklearn). (just a question, but) the better solution will be something like this: class NumericalImputerChoices(NamedTuple):
mean = "mean"
median = "median"
num_imputer_choices = NumericalImputerChoices() and use Btw, you do not need to put There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. also, you can add |
||
default_value="mean", | ||
), | ||
categorical_strategy: HyperparameterSearchSpace = HyperparameterSearchSpace( | ||
hyperparameter='categorical_strategy', | ||
value_range=("most_frequent", | ||
"constant_!missing!"), | ||
default_value="most_frequent") | ||
value_range=("most_frequent", "constant_!missing!"), | ||
default_value="most_frequent" | ||
) | ||
) -> ConfigurationSpace: | ||
"""Get the hyperparameter search space for the SimpleImputer | ||
Args: | ||
dataset_properties (Optional[Dict[str, BaseDatasetPropertiesType]]) | ||
Properties that describe the dataset | ||
Note: Not actually Optional, just adhering to its supertype | ||
numerical_strategy (HyperparameterSearchSpace: default = ...) | ||
The strategy to use for numerical imputation | ||
caterogical_strategy (HyperparameterSearchSpace: default = ...) | ||
The strategy to use for categorical imputation | ||
Returns: | ||
ConfigurationSpace | ||
The space of possible configurations for a SimpleImputer with the given | ||
`dataset_properties` | ||
""" | ||
cs = ConfigurationSpace() | ||
assert dataset_properties is not None, "To create hyperparameter search space" \ | ||
", dataset_properties should not be None" | ||
if len(dataset_properties['numerical_columns']) \ | ||
if isinstance(dataset_properties['numerical_columns'], List) else 0 != 0: | ||
|
||
if dataset_properties is None: | ||
raise ValueError("SimpleImputer requires `dataset_properties` for generating" | ||
eddiebergman marked this conversation as resolved.
Show resolved
Hide resolved
|
||
" a search space.") | ||
|
||
if ( | ||
isinstance(dataset_properties['numerical_columns'], List) | ||
and len(dataset_properties['numerical_columns']) != 0 | ||
): | ||
add_hyperparameter(cs, numerical_strategy, CategoricalHyperparameter) | ||
|
||
if len(dataset_properties['categorical_columns']) \ | ||
if isinstance(dataset_properties['categorical_columns'], List) else 0 != 0: | ||
if ( | ||
isinstance(dataset_properties['categorical_columns'], List) | ||
and len(dataset_properties['categorical_columns']) | ||
): | ||
add_hyperparameter(cs, categorical_strategy, CategoricalHyperparameter) | ||
|
||
return cs | ||
|
||
@staticmethod | ||
def get_properties(dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None | ||
) -> Dict[str, Union[str, bool]]: | ||
def get_properties( | ||
dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None | ||
) -> Dict[str, Union[str, bool]]: | ||
"""Get the properties of the SimpleImputer class and what it can handle | ||
Returns: | ||
Dict[str, Union[str, bool]]: | ||
A dict from property names to values | ||
""" | ||
return { | ||
'shortname': 'SimpleImputer', | ||
'name': 'Simple Imputer', | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We removed integer type from
random_state
, so it must beOptional[np.random.RandomState]
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Okay, will do. As an fyi, this can cause different output if you run the same function twice on the same object. The random state produces a sequence of numbers.
For example, if you create a single RandomState object and pass it to every object that requires a random_state, you will get different output depending on the order in which objects use that random_state. On the flip-side, if you use an int, they are independant of each other and so it doesn't matter which order objects use it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah, that is also true. My explanation was not sufficient, but we, in reality, decided to use
seed
forint
andrandom_state
fornp.random.RandomState
.So it is a very good decision if we switch to
seed
instead ofrandom_state
in the future.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We kept
random_state
as the arg name and allow for both, internally we just pass theseed
argument given at construction of anAutoSklearnClassifier
so that internally it's an int passed throughout.We follow sklearn in principle so we copy their expected behaviour.