Skip to content

Commit 827dc81

Browse files
committed
Pretty printing the show_models() dictionary
1 parent 389899f commit 827dc81

13 files changed

+36
-20
lines changed

examples/20_basic/example_classification.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
The following example shows how to fit a simple classification model with
88
*auto-sklearn*.
99
"""
10+
from pprint import pprint
11+
1012
import sklearn.datasets
1113
import sklearn.metrics
1214

@@ -42,7 +44,7 @@
4244
# Print the final ensemble constructed by auto-sklearn
4345
# ====================================================
4446

45-
print(automl.show_models())
47+
pprint(automl.show_models(), indent=4)
4648

4749
###########################################################################
4850
# Get the Score of the final ensemble

examples/20_basic/example_multilabel_classification.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
`here <https://scikit-learn.org/stable/modules/multiclass.html>`_.
99
"""
1010
import numpy as np
11+
from pprint import pprint
1112

1213
import sklearn.datasets
1314
import sklearn.metrics
@@ -65,7 +66,7 @@
6566
# Print the final ensemble constructed by auto-sklearn
6667
# ====================================================
6768

68-
print(automl.show_models())
69+
pprint(automl.show_models(), indent=4)
6970

7071
############################################################################
7172
# Print statistics about the auto-sklearn run

examples/20_basic/example_multioutput_regression.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
*auto-sklearn*.
99
"""
1010
import numpy as numpy
11+
from pprint import pprint
1112

1213
from sklearn.datasets import make_regression
1314
from sklearn.metrics import r2_score
@@ -46,7 +47,7 @@
4647
# Print the final ensemble constructed by auto-sklearn
4748
# ====================================================
4849

49-
print(automl.show_models())
50+
pprint(automl.show_models(), indent=4)
5051

5152
###########################################################################
5253
# Get the Score of the final ensemble

examples/20_basic/example_regression.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
The following example shows how to fit a simple regression model with
88
*auto-sklearn*.
99
"""
10+
from pprint import pprint
11+
1012
import sklearn.datasets
1113
import sklearn.metrics
1214

@@ -43,7 +45,7 @@
4345
# Print the final ensemble constructed by auto-sklearn
4446
# ====================================================
4547

46-
print(automl.show_models())
48+
pprint(automl.show_models(), indent=4)
4749

4850
#####################################
4951
# Get the Score of the final ensemble

examples/40_advanced/example_get_pipeline_components.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
the sklearn models. This example illustrates how to interact
1515
with the sklearn components directly, in this case a PCA preprocessor.
1616
"""
17+
from pprint import pprint
18+
1719
import sklearn.datasets
1820
import sklearn.metrics
1921

@@ -63,7 +65,7 @@
6365
# to construct ensembles in a post-hoc fashion. The ensemble is a linear
6466
# weighting of all models constructed during the hyperparameter optimization.
6567
# This prints the final ensemble. It is a dictionary where ``model_id`` of
66-
# each model is a key, and its value is a dictionary containing information
68+
# each model is a key, and value is a dictionary containing information
6769
# of that model. A model's dict contains its ``'model_id'``, ``'rank'``,
6870
# ``'cost'``, ``'ensemble_weight'``, and the model itself. The model is
6971
# given by the ``'data_preprocessor'``, ``'feature_preprocessor'``,
@@ -72,7 +74,7 @@
7274
# model is stored in the ``'estimators'`` list in the dict, along with the
7375
# ``'voting_model'``.
7476

75-
print(automl.show_models())
77+
pprint(automl.show_models(), indent=4)
7678

7779
###########################################################################
7880
# Report statistics about the search

examples/40_advanced/example_interpretable_models.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
The following example shows how to inspect the models which *auto-sklearn*
88
optimizes over and how to restrict them to an interpretable subset.
99
"""
10+
from pprint import pprint
11+
1012
import autosklearn.classification
1113
import sklearn.datasets
1214
import sklearn.metrics
@@ -70,7 +72,7 @@
7072
# Print the final ensemble constructed by auto-sklearn
7173
# ====================================================
7274

73-
print(automl.show_models())
75+
pprint(automl.show_models(), indent=4)
7476

7577
###########################################################################
7678
# Get the Score of the final ensemble

examples/60_search/example_random_search.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
as yet another alternative optimizatino strategy.
1313
Both examples are intended to show how the optimization strategy in *auto-sklearn* can be adapted.
1414
""" # noqa (links are too long)
15+
from pprint import pprint
1516

1617
import sklearn.model_selection
1718
import sklearn.datasets
@@ -75,7 +76,7 @@ def get_roar_object_callback(
7576
print('#' * 80)
7677
print('Results for ROAR.')
7778
# Print the final ensemble constructed by auto-sklearn via ROAR.
78-
print(automl.show_models())
79+
pprint(automl.show_models(), indent=4)
7980
predictions = automl.predict(X_test)
8081
# Print statistics about the auto-sklearn run such as number of
8182
# iterations, number of models failed with a time out.
@@ -129,7 +130,7 @@ def get_random_search_object_callback(
129130
print('Results for random search.')
130131

131132
# Print the final ensemble constructed by auto-sklearn via random search.
132-
print(automl.show_models())
133+
pprint(automl.show_models(), indent=4)
133134

134135
# Print statistics about the auto-sklearn run such as number of
135136
# iterations, number of models failed with a time out.

examples/60_search/example_sequential.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
sequentially. The example below shows how to first fit the models and build the
99
ensembles afterwards.
1010
"""
11+
from pprint import pprint
1112

1213
import sklearn.model_selection
1314
import sklearn.datasets
@@ -48,7 +49,7 @@
4849
# Print the final ensemble constructed by auto-sklearn
4950
# ====================================================
5051

51-
print(automl.show_models())
52+
pprint(automl.show_models(), indent=4)
5253

5354
############################################################################
5455
# Get the Score of the final ensemble

examples/60_search/example_successive_halving.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
To get the BOHB algorithm, simply import Hyperband and use it as the intensification strategy.
1515
1616
""" # noqa (links are too long)
17-
17+
from pprint import pprint
1818

1919
import sklearn.model_selection
2020
import sklearn.datasets
@@ -110,7 +110,7 @@ def get_smac_object(
110110
)
111111
automl.fit(X_train, y_train, dataset_name='breast_cancer')
112112

113-
print(automl.show_models())
113+
pprint(automl.show_models(), indent=4)
114114
predictions = automl.predict(X_test)
115115
# Print statistics about the auto-sklearn run such as number of
116116
# iterations, number of models failed with a time out.
@@ -143,7 +143,7 @@ def get_smac_object(
143143
automl.fit(X_train, y_train, dataset_name='breast_cancer')
144144

145145
# Print the final ensemble constructed by auto-sklearn.
146-
print(automl.show_models())
146+
pprint(automl.show_models(), indent=4)
147147
automl.refit(X_train, y_train)
148148
predictions = automl.predict(X_test)
149149
# Print statistics about the auto-sklearn run such as number of
@@ -177,7 +177,7 @@ def get_smac_object(
177177
automl.fit(X_train, y_train, dataset_name='breast_cancer')
178178

179179
# Print the final ensemble constructed by auto-sklearn.
180-
print(automl.show_models())
180+
pprint(automl.show_models(), indent=4)
181181
automl.refit(X_train, y_train)
182182
predictions = automl.predict(X_test)
183183
# Print statistics about the auto-sklearn run such as number of
@@ -208,7 +208,7 @@ def get_smac_object(
208208
automl.fit(X_train, y_train, dataset_name='breast_cancer')
209209

210210
# Print the final ensemble constructed by auto-sklearn.
211-
print(automl.show_models())
211+
pprint(automl.show_models(), indent=4)
212212
predictions = automl.predict(X_test)
213213
# Print statistics about the auto-sklearn run such as number of
214214
# iterations, number of models failed with a time out.
@@ -245,7 +245,7 @@ def get_smac_object(
245245
automl.fit(X_train, y_train, dataset_name='breast_cancer')
246246

247247
# Print the final ensemble constructed by auto-sklearn.
248-
print(automl.show_models())
248+
pprint(automl.show_models(), indent=4)
249249
predictions = automl.predict(X_test)
250250
# Print statistics about the auto-sklearn run such as number of
251251
# iterations, number of models failed with a time out.

examples/80_extending/example_extending_classification.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
The following example demonstrates how to create a new classification
77
component for using in auto-sklearn.
88
"""
9+
from pprint import pprint
910

1011
from ConfigSpace.configuration_space import ConfigurationSpace
1112
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
@@ -149,4 +150,4 @@ def get_hyperparameter_search_space(dataset_properties=None):
149150

150151
y_pred = clf.predict(X_test)
151152
print("accuracy: ", sklearn.metrics.accuracy_score(y_pred, y_test))
152-
print(clf.show_models())
153+
pprint(clf.show_models(), indent=4)

examples/80_extending/example_extending_data_preprocessor.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
66
The following example demonstrates how to turn off data preprocessing step in auto-skearn.
77
"""
8+
from pprint import pprint
89

910
import autosklearn.classification
1011
import autosklearn.pipeline.components.data_preprocessing
@@ -89,4 +90,4 @@ def get_hyperparameter_search_space(dataset_properties=None):
8990

9091
y_pred = clf.predict(X_test)
9192
print("accuracy: ", sklearn.metrics.accuracy_score(y_pred, y_test))
92-
print(clf.show_models())
93+
pprint(clf.show_models(), indent=4)

examples/80_extending/example_extending_preprocessor.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
discriminant analysis (LDA) algorithm from sklearn and use it as a preprocessor
88
in auto-sklearn.
99
"""
10+
from pprint import pprint
1011

1112
from ConfigSpace.configuration_space import ConfigurationSpace
1213
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, CategoricalHyperparameter
@@ -130,4 +131,4 @@ def get_hyperparameter_search_space(dataset_properties=None):
130131

131132
y_pred = clf.predict(X_test)
132133
print("accuracy: ", sklearn.metrics.accuracy_score(y_pred, y_test))
133-
print(clf.show_models())
134+
pprint(clf.show_models(), indent=4)

examples/80_extending/example_extending_regression.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
The following example demonstrates how to create a new regression
77
component for using in auto-sklearn.
88
"""
9+
from pprint import pprint
910

1011
from ConfigSpace.configuration_space import ConfigurationSpace
1112
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
@@ -137,4 +138,4 @@ def get_hyperparameter_search_space(dataset_properties=None):
137138
# =====================================
138139
y_pred = reg.predict(X_test)
139140
print("r2 score: ", sklearn.metrics.r2_score(y_pred, y_test))
140-
print(reg.show_models())
141+
pprint(reg.show_models(), indent=4)

0 commit comments

Comments
 (0)