Skip to content

STY: Enable B904 #56941

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jan 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/algos/isin.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def setup(self, dtype):
self.series = Series(
Index([f"i-{i}" for i in range(N)], dtype=object), dtype=dtype
)
except ImportError:
raise NotImplementedError
except ImportError as err:
raise NotImplementedError from err
self.values = list(self.series[:2])

else:
Expand Down
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ class ArrowStringArray:
def setup(self, multiple_chunks):
try:
import pyarrow as pa
except ImportError:
raise NotImplementedError
except ImportError as err:
raise NotImplementedError from err
strings = np.array([str(i) for i in range(10_000)], dtype=object)
if multiple_chunks:
chunks = [strings[i : i + 100] for i in range(0, len(strings), 100)]
Expand Down
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def setup(self, dtype):
self.s = Series(
Index([f"i-{i}" for i in range(10000)], dtype=object), dtype=dtype
)
except ImportError:
raise NotImplementedError
except ImportError as err:
raise NotImplementedError from err


class Construction:
Expand Down
4 changes: 2 additions & 2 deletions pandas/compat/_optional.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,9 @@ def import_optional_dependency(
)
try:
module = importlib.import_module(name)
except ImportError:
except ImportError as err:
if errors == "raise":
raise ImportError(msg)
raise ImportError(msg) from err
return None

# Handle submodules: if we have submodule, grab parent module from sys.modules
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/array_algos/datetimelike_accumulations.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,10 @@ def _cum_func(
np.cumsum: 0,
np.minimum.accumulate: np.iinfo(np.int64).max,
}[func]
except KeyError:
raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
except KeyError as err:
raise ValueError(
f"No accumulation for {func} implemented on BaseMaskedArray"
) from err

mask = isna(values)
y = values.view("i8")
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/array_algos/masked_accumulations.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,10 @@ def _cum_func(
np.cumsum: 0,
np.minimum.accumulate: dtype_info.max,
}[func]
except KeyError:
except KeyError as err:
raise NotImplementedError(
f"No accumulation for {func} implemented on BaseMaskedArray"
)
) from err

values[mask] = fill_value

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,9 +597,9 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
except (
TypeError, # downstream error msg for CategoricalIndex is misleading
ValueError,
):
) as err:
msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}"
raise ValueError(msg)
raise ValueError(msg) from err

result = take_nd(
new_cats, ensure_platform_int(self._codes), fill_value=fill_value
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,9 +524,9 @@ def _validate_comparison_value(self, other):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except (ValueError, IncompatibleFrequency):
except (ValueError, IncompatibleFrequency) as err:
# failed to parse as Timestamp/Timedelta/Period
raise InvalidComparison(other)
raise InvalidComparison(other) from err

if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
Expand Down Expand Up @@ -664,11 +664,11 @@ def _validate_listlike(self, value, allow_object: bool = False):
if lib.infer_dtype(value) in self._infer_matches:
try:
value = type(self)._from_sequence(value)
except (ValueError, TypeError):
except (ValueError, TypeError) as err:
if allow_object:
return value
msg = self._validation_error_message(value, True)
raise TypeError(msg)
raise TypeError(msg) from err

# Do type inference if necessary up front (after unpacking
# NumpyExtensionArray)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -1188,10 +1188,10 @@ def dt64arr_to_periodarr(
freq = Period._maybe_convert_freq(freq)
try:
base = freq._period_dtype_code
except (AttributeError, TypeError):
except (AttributeError, TypeError) as err:
# AttributeError: _period_dtype_code might not exist
# TypeError: _period_dtype_code might intentionally raise
raise TypeError(f"{freq.name} is not supported as period frequency")
raise TypeError(f"{freq.name} is not supported as period frequency") from err
return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq


Expand Down
4 changes: 2 additions & 2 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -1817,8 +1817,8 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any:
# TODO: general-case for EAs?
try:
casted = element.astype(dtype)
except (ValueError, TypeError):
raise LossySetitemError
except (ValueError, TypeError) as err:
raise LossySetitemError from err
# Check for cases of either
# a) lossy overflow/rounding or
# b) semantic changes like dt64->int64
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,8 +576,10 @@ def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs):
def _get_axis_number(cls, axis: Axis) -> AxisInt:
try:
return cls._AXIS_TO_AXIS_NUMBER[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
except KeyError as err:
raise ValueError(
f"No axis named {axis} for object type {cls.__name__}"
) from err

@final
@classmethod
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3798,7 +3798,7 @@ def get_loc(self, key):
isinstance(casted_key, abc.Iterable)
and any(isinstance(x, slice) for x in casted_key)
):
raise InvalidIndexError(key)
raise InvalidIndexError(key) from err
raise KeyError(key) from err
except TypeError:
# If we have a listlike key, _check_indexing_error will raise
Expand Down Expand Up @@ -5750,13 +5750,13 @@ def asof(self, label):
self._searchsorted_monotonic(label) # validate sortedness
try:
loc = self.get_loc(label)
except (KeyError, TypeError):
except (KeyError, TypeError) as err:
# KeyError -> No exact match, try for padded
# TypeError -> passed e.g. non-hashable, fall through to get
# the tested exception message
indexer = self.get_indexer([label], method="pad")
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("asof requires scalar valued input")
raise TypeError("asof requires scalar valued input") from err
loc = indexer.item()
if loc == -1:
return self._na_value
Expand Down Expand Up @@ -6812,7 +6812,7 @@ def get_slice_bound(self, label, side: Literal["left", "right"]) -> int:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
raise err from None

if isinstance(slc, np.ndarray):
# get_loc may return a boolean array, which
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/interchange/column.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,8 @@ def describe_null(self):
kind = self.dtype[0]
try:
null, value = _NULL_DESCRIPTION[kind]
except KeyError:
raise NotImplementedError(f"Data type {kind} not yet supported")
except KeyError as err:
raise NotImplementedError(f"Data type {kind} not yet supported") from err

return null, value

Expand Down Expand Up @@ -341,9 +341,9 @@ def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]:

try:
msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask"
except KeyError:
except KeyError as err:
# TODO: implement for other bit/byte masks?
raise NotImplementedError("See self.describe_null")
raise NotImplementedError("See self.describe_null") from err

raise NoBufferPresent(msg)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/reshape/encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,8 +493,8 @@ def from_dummies(
# index data with a list of all columns that are dummies
try:
data_to_decode = data.astype("boolean", copy=False)
except TypeError:
raise TypeError("Passed DataFrame contains non-dummy data")
except TypeError as err:
raise TypeError("Passed DataFrame contains non-dummy data") from err

# collect prefixes and get lists to slice data for each prefix
variables_slice = defaultdict(list)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,11 +582,11 @@ def ensure_key_mapped(
type_of_values = type(values)
# error: Too many arguments for "ExtensionArray"
result = type_of_values(result) # type: ignore[call-arg]
except TypeError:
except TypeError as err:
raise TypeError(
f"User-provided `key` function returned an invalid type {type(result)} \
which could not be converted to {type(values)}."
)
) from err

return result

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/tools/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -510,14 +510,14 @@ def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index:
with np.errstate(over="raise"):
try:
arr = cast_from_unit_vectorized(arg, unit=unit)
except OutOfBoundsDatetime:
except OutOfBoundsDatetime as err:
if errors != "raise":
return _to_datetime_with_unit(
arg.astype(object), unit, name, utc, errors
)
raise OutOfBoundsDatetime(
f"cannot convert input with unit '{unit}'"
)
) from err

arr = arr.view("M8[ns]")
tz_parsed = None
Expand Down
4 changes: 2 additions & 2 deletions pandas/io/formats/style_render.py
Original file line number Diff line number Diff line change
Expand Up @@ -1922,11 +1922,11 @@ def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList:
for x in s
if x.strip() != ""
]
except IndexError:
except IndexError as err:
raise ValueError(
"Styles supplied as string must follow CSS rule formats, "
f"for example 'attr: val;'. '{style}' was given."
)
) from err
return style


Expand Down
20 changes: 12 additions & 8 deletions pandas/io/formats/xml.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@ def _build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any:
try:
if not isna(d[col]):
elem_row.attrib[attr_name] = str(d[col])
except KeyError:
raise KeyError(f"no valid column, {col}")
except KeyError as err:
raise KeyError(f"no valid column, {col}") from err
return elem_row

@final
Expand Down Expand Up @@ -330,8 +330,8 @@ def _build_elems(self, d: dict[str, Any], elem_row: Any) -> None:
try:
val = None if isna(d[col]) or d[col] == "" else str(d[col])
sub_element_cls(elem_row, elem_name).text = val
except KeyError:
raise KeyError(f"no valid column, {col}")
except KeyError as err:
raise KeyError(f"no valid column, {col}") from err

@final
def write_output(self) -> str | None:
Expand Down Expand Up @@ -408,8 +408,10 @@ def _get_prefix_uri(self) -> str:
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError:
raise KeyError(f"{self.prefix} is not included in namespaces")
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f'{{{self.namespaces[""]}}}'
else:
Expand Down Expand Up @@ -504,8 +506,10 @@ def _get_prefix_uri(self) -> str:
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError:
raise KeyError(f"{self.prefix} is not included in namespaces")
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f'{{{self.namespaces[""]}}}'
else:
Expand Down
8 changes: 4 additions & 4 deletions pandas/io/parsers/arrow_parser_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,9 @@ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
self.dtype = pandas_dtype(self.dtype)
try:
frame = frame.astype(self.dtype)
except TypeError as e:
except TypeError as err:
# GH#44901 reraise to keep api consistent
raise ValueError(e)
raise ValueError(str(err)) from err
return frame

def _validate_usecols(self, usecols) -> None:
Expand Down Expand Up @@ -247,7 +247,7 @@ def read(self) -> DataFrame:

try:
convert_options = pyarrow_csv.ConvertOptions(**self.convert_options)
except TypeError:
except TypeError as err:
include = self.convert_options.get("include_columns", None)
if include is not None:
self._validate_usecols(include)
Expand All @@ -258,7 +258,7 @@ def read(self) -> DataFrame:
):
raise TypeError(
"The 'pyarrow' engine requires all na_values to be strings"
)
) from err

raise

Expand Down
4 changes: 2 additions & 2 deletions pandas/io/parsers/python_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -733,8 +733,8 @@ def _next_line(self) -> list[Scalar]:
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
except IndexError as err:
raise StopIteration from err
else:
while self.skipfunc(self.pos):
self.pos += 1
Expand Down
8 changes: 4 additions & 4 deletions pandas/io/xml.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,12 +484,12 @@ def _validate_path(self) -> list[Any]:
if children == [] and attrs == {}:
raise ValueError(msg)

except (KeyError, SyntaxError):
except (KeyError, SyntaxError) as err:
raise SyntaxError(
"You have used an incorrect or unsupported XPath "
"expression for etree library or you used an "
"undeclared namespace prefix."
)
) from err

return elems

Expand Down Expand Up @@ -746,12 +746,12 @@ class that build Data Frame and infers specific dtypes.
try:
with TextParser(nodes, names=tags, **kwargs) as tp:
return tp.read()
except ParserError:
except ParserError as err:
raise ParserError(
"XML document may be too complex for import. "
"Try to flatten document and use distinct "
"element and attribute names."
)
) from err


def _parse(
Expand Down
8 changes: 4 additions & 4 deletions pandas/tests/indexes/datetimes/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -1045,12 +1045,12 @@ def test_dti_constructor_with_non_nano_now_today(self):
tolerance = pd.Timedelta(microseconds=1)

diff0 = result[0] - now.as_unit("s")
assert diff0 >= pd.Timedelta(0)
assert diff0 < tolerance
assert diff0 >= pd.Timedelta(0), f"The difference is {diff0}"
assert diff0 < tolerance, f"The difference is {diff0}"

diff1 = result[1] - today.as_unit("s")
assert diff1 >= pd.Timedelta(0)
assert diff1 < tolerance
assert diff1 >= pd.Timedelta(0), f"The difference is {diff0}"
assert diff1 < tolerance, f"The difference is {diff0}"

def test_dti_constructor_object_float_matches_float_dtype(self):
# GH#55780
Expand Down
Loading