Skip to content

CLN: remove redundant code related to Styler #39884

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 0 additions & 54 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2403,57 +2403,3 @@ def need_slice(obj) -> bool:
or obj.stop is not None
or (obj.step is not None and obj.step != 1)
)


def non_reducing_slice(slice_):
"""
Ensure that a slice doesn't reduce to a Series or Scalar.

Any user-passed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]

def pred(part) -> bool:
"""
Returns
-------
bool
True if slice does *not* reduce,
False if `part` is a tuple.
"""
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
if isinstance(part, tuple):
# GH#39421 check for sub-slice:
return any((isinstance(s, slice) or is_list_like(s)) for s in part)
else:
return isinstance(part, slice) or is_list_like(part)

if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)


def maybe_numeric_slice(df, slice_, include_bool: bool = False):
"""
Want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
65 changes: 53 additions & 12 deletions pandas/io/formats/style.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from pandas.util._decorators import doc

from pandas.core.dtypes.common import is_float
from pandas.core.dtypes.generic import ABCSeries

import pandas as pd
from pandas.api.types import (
Expand All @@ -47,10 +48,7 @@
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.indexing import (
maybe_numeric_slice,
non_reducing_slice,
)
from pandas.core.indexes.api import Index

jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
CSSSequence = Sequence[Tuple[str, Union[str, int, float]]]
Expand Down Expand Up @@ -625,7 +623,7 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> Styler
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = non_reducing_slice(subset)
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns

Expand Down Expand Up @@ -851,7 +849,7 @@ def _apply(
**kwargs,
) -> Styler:
subset = slice(None) if subset is None else subset
subset = non_reducing_slice(subset)
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis, result_type="expand", **kwargs)
Expand Down Expand Up @@ -954,7 +952,7 @@ def _applymap(self, func: Callable, subset=None, **kwargs) -> Styler:
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = non_reducing_slice(subset)
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
Expand Down Expand Up @@ -1304,7 +1302,7 @@ def hide_columns(self, subset) -> Styler:
-------
self : Styler
"""
subset = non_reducing_slice(subset)
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
Expand Down Expand Up @@ -1379,8 +1377,9 @@ def background_gradient(
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = maybe_numeric_slice(self.data, subset)
subset = non_reducing_slice(subset)
if subset is None:
subset = self.data.select_dtypes(include=np.number).columns

self.apply(
self._background_gradient,
cmap=cmap,
Expand Down Expand Up @@ -1613,8 +1612,9 @@ def bar(
"(eg: color=['#d65f5f', '#5fba7d'])"
)

subset = maybe_numeric_slice(self.data, subset)
subset = non_reducing_slice(subset)
if subset is None:
subset = self.data.select_dtypes(include=np.number).columns

self.apply(
self._bar,
subset=subset,
Expand Down Expand Up @@ -2088,3 +2088,44 @@ def _maybe_convert_css_to_tuples(style: CSSProperties) -> CSSSequence:
f"for example 'attr: val;'. '{style}' was given."
)
return style


def _non_reducing_slice(slice_):
"""
Ensure that a slice doesn't reduce to a Series or Scalar.

Any user-passed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = pd.IndexSlice[:, slice_]

def pred(part) -> bool:
"""
Returns
-------
bool
True if slice does *not* reduce,
False if `part` is a tuple.
"""
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
if isinstance(part, tuple):
# GH#39421 check for sub-slice:
return any((isinstance(s, slice) or is_list_like(s)) for s in part)
else:
return isinstance(part, slice) or is_list_like(part)

if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
54 changes: 0 additions & 54 deletions pandas/tests/indexing/multiindex/test_slice.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
Timestamp,
)
import pandas._testing as tm
from pandas.core.indexing import non_reducing_slice
from pandas.tests.indexing.common import _mklbl


Expand Down Expand Up @@ -769,59 +768,6 @@ def test_int_series_slicing(self, multiindex_year_month_day_dataframe_random_dat
expected = ymd.reindex(s.index[5:])
tm.assert_frame_equal(result, expected)

def test_non_reducing_slice_on_multiindex(self):
# GH 19861
dic = {
("a", "d"): [1, 4],
("a", "c"): [2, 3],
("b", "c"): [3, 2],
("b", "d"): [4, 1],
}
df = DataFrame(dic, index=[0, 1])
idx = pd.IndexSlice
slice_ = idx[:, idx["b", "d"]]
tslice_ = non_reducing_slice(slice_)

result = df.loc[tslice_]
expected = DataFrame({("b", "d"): [4, 1]})
tm.assert_frame_equal(result, expected)

@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:, :],
# check cols
pd.IndexSlice[:, pd.IndexSlice[["a"]]], # inferred deeper need list
pd.IndexSlice[:, pd.IndexSlice[["a"], ["c"]]], # inferred deeper need list
pd.IndexSlice[:, pd.IndexSlice["a", "c", :]],
pd.IndexSlice[:, pd.IndexSlice["a", :, "e"]],
pd.IndexSlice[:, pd.IndexSlice[:, "c", "e"]],
pd.IndexSlice[:, pd.IndexSlice["a", ["c", "d"], :]], # check list
pd.IndexSlice[:, pd.IndexSlice["a", ["c", "d", "-"], :]], # allow missing
pd.IndexSlice[:, pd.IndexSlice["a", ["c", "d", "-"], "e"]], # no slice
# check rows
pd.IndexSlice[pd.IndexSlice[["U"]], :], # inferred deeper need list
pd.IndexSlice[pd.IndexSlice[["U"], ["W"]], :], # inferred deeper need list
pd.IndexSlice[pd.IndexSlice["U", "W", :], :],
pd.IndexSlice[pd.IndexSlice["U", :, "Y"], :],
pd.IndexSlice[pd.IndexSlice[:, "W", "Y"], :],
pd.IndexSlice[pd.IndexSlice[:, "W", ["Y", "Z"]], :], # check list
pd.IndexSlice[pd.IndexSlice[:, "W", ["Y", "Z", "-"]], :], # allow missing
pd.IndexSlice[pd.IndexSlice["U", "W", ["Y", "Z", "-"]], :], # no slice
# check simultaneous
pd.IndexSlice[pd.IndexSlice[:, "W", "Y"], pd.IndexSlice["a", "c", :]],
],
)
def test_non_reducing_multi_slice_on_multiindex(self, slice_):
# GH 33562
cols = pd.MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]])
idxs = pd.MultiIndex.from_product([["U", "V"], ["W", "X"], ["Y", "Z"]])
df = DataFrame(np.arange(64).reshape(8, 8), columns=cols, index=idxs)

expected = df.loc[slice_]
result = df.loc[non_reducing_slice(slice_)]
tm.assert_frame_equal(result, expected)

def test_loc_slice_negative_stepsize(self):
# GH#38071
mi = MultiIndex.from_product([["a", "b"], [0, 1]])
Expand Down
51 changes: 0 additions & 51 deletions pandas/tests/indexing/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,6 @@
timedelta_range,
)
import pandas._testing as tm
from pandas.core.indexing import (
maybe_numeric_slice,
non_reducing_slice,
)
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj

Expand Down Expand Up @@ -794,53 +790,6 @@ def test_range_in_series_indexing(self, size):
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))

@pytest.mark.parametrize(
"slc",
[
pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
Series([0, 1]),
],
)
def test_non_reducing_slice(self, slc):
df = DataFrame([[0, 1], [2, 3]])

tslice_ = non_reducing_slice(slc)
assert isinstance(df.loc[tslice_], DataFrame)

@pytest.mark.parametrize("box", [list, Series, np.array])
def test_list_slice(self, box):
# like dataframe getitem
subset = box(["A"])

df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"])
expected = pd.IndexSlice[:, ["A"]]

result = non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])

def test_maybe_numeric_slice(self):
df = DataFrame({"A": [1, 2], "B": ["c", "d"], "C": [True, False]})
result = maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ["A"]]
assert result == expected

result = maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ["A", "C"]]
assert all(result[1] == expected[1])
result = maybe_numeric_slice(df, [1])
expected = [1]
assert result == expected

def test_partial_boolean_frame_indexing(self):
# GH 17170
df = DataFrame(
Expand Down
Loading