From 4fac404d6e74cb4bee738e5ef30631c69962a208 Mon Sep 17 00:00:00 2001 From: Will Ayd Date: Wed, 23 Nov 2022 13:03:46 -0800 Subject: [PATCH 1/5] removed inline class functions --- pandas/_libs/hashtable.pxd | 2 +- pandas/_libs/hashtable_class_helper.pxi.in | 6 +-- pandas/_libs/index.pyx | 10 ++-- pandas/_libs/lib.pyx | 54 +++++++++++----------- pandas/_libs/parsers.pyx | 2 +- pandas/_libs/tslibs/timedeltas.pxd | 2 +- pandas/_libs/tslibs/timedeltas.pyx | 2 +- pandas/_libs/tslibs/timestamps.pyx | 2 +- pandas/_libs/tslibs/tzconversion.pxd | 2 +- pandas/_libs/tslibs/tzconversion.pyx | 2 +- 10 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index b32bd4880588d..6f66884ac8206 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -185,5 +185,5 @@ cdef class Int64Vector(Vector): cdef resize(self) cpdef ndarray to_array(self) - cdef inline void append(self, int64_t x) + cdef void append(self, int64_t x) cdef extend(self, int64_t[:] x) diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 47dd0cbbd7164..6f51e317de57e 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -241,7 +241,7 @@ cdef class {{name}}Vector(Vector): self.external_view_exists = True return self.ao - cdef inline void append(self, {{c_type}} x): + cdef void append(self, {{c_type}} x): if needs_resize(self.data): if self.external_view_exists: @@ -311,7 +311,7 @@ cdef class StringVector(Vector): self.data.m = self.data.n return ao - cdef inline void append(self, char *x): + cdef void append(self, char *x): if needs_resize(self.data): self.resize() @@ -339,7 +339,7 @@ cdef class ObjectVector(Vector): def __len__(self) -> int: return self.n - cdef inline append(self, object obj): + cdef append(self, object obj): if self.n == self.m: if self.external_view_exists: raise ValueError("external reference but " diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 27edc83c6f329..ec54400bd4a33 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -176,7 +176,7 @@ cdef class IndexEngine: loc = self.values.searchsorted(self._np_type(val), side="left") return loc - cdef inline _get_loc_duplicates(self, object val): + cdef _get_loc_duplicates(self, object val): # -> Py_ssize_t | slice | ndarray[bool] cdef: Py_ssize_t diff, left, right @@ -225,7 +225,7 @@ cdef class IndexEngine: return self.unique == 1 - cdef inline _do_unique_check(self): + cdef _do_unique_check(self): # this de-facto the same self._ensure_mapping_populated() @@ -244,7 +244,7 @@ cdef class IndexEngine: return self.monotonic_dec == 1 - cdef inline _do_monotonic_check(self): + cdef _do_monotonic_check(self): cdef: bint is_unique try: @@ -277,7 +277,7 @@ cdef class IndexEngine: def is_mapping_populated(self) -> bool: return self.mapping is not None - cdef inline _ensure_mapping_populated(self): + cdef _ensure_mapping_populated(self): # this populates the mapping # if its not already populated # also satisfies the need_unique_check @@ -932,7 +932,7 @@ cdef class SharedEngine: return self._get_loc_duplicates(val) - cdef inline _get_loc_duplicates(self, object val): + cdef _get_loc_duplicates(self, object val): # -> Py_ssize_t | slice | ndarray[bool] cdef: Py_ssize_t diff diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index b4c38df021484..de11820d45af4 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1206,7 +1206,7 @@ cdef class Seen: self.interval_ = False self.coerce_numeric = coerce_numeric - cdef inline bint check_uint64_conflict(self) except -1: + cdef bint check_uint64_conflict(self) except -1: """ Check whether we can safely convert a uint64 array to a numeric dtype. @@ -1240,7 +1240,7 @@ cdef class Seen: return (self.uint_ and (self.null_ or self.sint_) and not self.coerce_numeric) - cdef inline saw_null(self): + cdef saw_null(self): """ Set flags indicating that a null value was encountered. """ @@ -1655,10 +1655,10 @@ cdef class Validator: @cython.internal cdef class BoolValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return util.is_bool_object(value) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.bool_) @@ -1672,10 +1672,10 @@ cpdef bint is_bool_array(ndarray values, bint skipna=False): @cython.internal cdef class IntegerValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return util.is_integer_object(value) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.integer) @@ -1690,7 +1690,7 @@ cpdef bint is_integer_array(ndarray values, bint skipna=True): @cython.internal cdef class IntegerNaValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return (util.is_integer_object(value) or (util.is_nan(value) and util.is_float_object(value))) @@ -1704,10 +1704,10 @@ cdef bint is_integer_na_array(ndarray values, bint skipna=True): @cython.internal cdef class IntegerFloatValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return util.is_integer_object(value) or util.is_float_object(value) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.integer) @@ -1721,10 +1721,10 @@ cdef bint is_integer_float_array(ndarray values, bint skipna=True): @cython.internal cdef class FloatValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return util.is_float_object(value) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.floating) @@ -1737,13 +1737,13 @@ cpdef bint is_float_array(ndarray values): @cython.internal cdef class ComplexValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return ( util.is_complex_object(value) or (util.is_float_object(value) and is_nan(value)) ) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.complexfloating) @@ -1755,7 +1755,7 @@ cdef bint is_complex_array(ndarray values): @cython.internal cdef class DecimalValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return is_decimal(value) @@ -1769,10 +1769,10 @@ cdef bint is_decimal_array(ndarray values, bint skipna=False): @cython.internal cdef class StringValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return isinstance(value, str) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.str_) @@ -1786,10 +1786,10 @@ cpdef bint is_string_array(ndarray values, bint skipna=False): @cython.internal cdef class BytesValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return isinstance(value, bytes) - cdef inline bint is_array_typed(self) except -1: + cdef bint is_array_typed(self) except -1: return issubclass(self.dtype.type, np.bytes_) @@ -1812,14 +1812,14 @@ cdef class TemporalValidator(Validator): self.skipna = skipna self.all_generic_na = True - cdef inline bint is_valid(self, object value) except -1: + cdef bint is_valid(self, object value) except -1: return self.is_value_typed(value) or self.is_valid_null(value) cdef bint is_valid_null(self, object value) except -1: raise NotImplementedError(f"{type(self).__name__} child class " "must define is_valid_null") - cdef inline bint is_valid_skipna(self, object value) except -1: + cdef bint is_valid_skipna(self, object value) except -1: cdef: bint is_typed_null = self.is_valid_null(value) bint is_generic_null = value is None or util.is_nan(value) @@ -1840,7 +1840,7 @@ cdef class DatetimeValidator(TemporalValidator): cdef bint is_value_typed(self, object value) except -1: return PyDateTime_Check(value) - cdef inline bint is_valid_null(self, object value) except -1: + cdef bint is_valid_null(self, object value) except -1: return is_null_datetime64(value) @@ -1853,7 +1853,7 @@ cpdef bint is_datetime_array(ndarray values, bint skipna=True): @cython.internal cdef class Datetime64Validator(DatetimeValidator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return util.is_datetime64_object(value) @@ -1867,7 +1867,7 @@ cpdef bint is_datetime64_array(ndarray values, bint skipna=True): @cython.internal cdef class AnyDatetimeValidator(DatetimeValidator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return util.is_datetime64_object(value) or ( PyDateTime_Check(value) and value.tzinfo is None ) @@ -1919,13 +1919,13 @@ cdef class TimedeltaValidator(TemporalValidator): cdef bint is_value_typed(self, object value) except -1: return PyDelta_Check(value) - cdef inline bint is_valid_null(self, object value) except -1: + cdef bint is_valid_null(self, object value) except -1: return is_null_timedelta64(value) @cython.internal cdef class AnyTimedeltaValidator(TimedeltaValidator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return is_timedelta(value) @@ -1942,7 +1942,7 @@ cpdef bint is_timedelta_or_timedelta64_array(ndarray values, bint skipna=True): @cython.internal cdef class DateValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return PyDate_Check(value) @@ -1955,7 +1955,7 @@ cpdef bint is_date_array(ndarray values, bint skipna=False): @cython.internal cdef class TimeValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: + cdef bint is_value_typed(self, object value) except -1: return PyTime_Check(value) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index a5b07d46bfeef..deb85fa9bdedd 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1078,7 +1078,7 @@ cdef class TextReader: return results # -> tuple["ArrayLike", int]: - cdef inline _convert_tokens(self, Py_ssize_t i, int64_t start, + cdef _convert_tokens(self, Py_ssize_t i, int64_t start, int64_t end, object name, bint na_filter, kh_str_starts_t *na_hashset, object na_flist, object col_dtype): diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index e0313271a13df..3f37ef7eb1e3f 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -24,6 +24,6 @@ cdef class _Timedelta(timedelta): cdef bint _has_ns(self) cdef bint _is_in_pytimedelta_bounds(self) cdef _ensure_components(_Timedelta self) - cdef inline bint _compare_mismatched_resos(self, _Timedelta other, op) + cdef bint _compare_mismatched_resos(self, _Timedelta other, op) cdef _Timedelta _as_creso(self, NPY_DATETIMEUNIT reso, bint round_ok=*) cpdef _maybe_cast_to_matching_resos(self, _Timedelta other) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 5cc97a722b7a6..c1d7229bf8a54 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1151,7 +1151,7 @@ cdef class _Timedelta(timedelta): return self._compare_mismatched_resos(ots, op) # TODO: re-use/share with Timestamp - cdef inline bint _compare_mismatched_resos(self, _Timedelta other, op): + cdef bint _compare_mismatched_resos(self, _Timedelta other, op): # Can't just dispatch to numpy as they silently overflow and get it wrong cdef: npy_datetimestruct dts_self diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b0208f9ca3296..b23e169ce4fd2 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -361,7 +361,7 @@ cdef class _Timestamp(ABCTimestamp): return self._compare_mismatched_resos(ots, op) # TODO: copied from Timedelta; try to de-duplicate - cdef inline bint _compare_mismatched_resos(self, _Timestamp other, int op): + cdef bint _compare_mismatched_resos(self, _Timestamp other, int op): # Can't just dispatch to numpy as they silently overflow and get it wrong cdef: npy_datetimestruct dts_self diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 1b95899e5c037..7c1dd04e2b2cc 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -31,7 +31,7 @@ cdef class Localizer: int64_t delta int64_t* tdata - cdef inline int64_t utc_val_to_local_val( + cdef int64_t utc_val_to_local_val( self, int64_t utc_val, Py_ssize_t* pos, diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 99855b36e8676..ba65b9d593057 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -116,7 +116,7 @@ cdef class Localizer: self.tdata = cnp.PyArray_DATA(trans) @cython.boundscheck(False) - cdef inline int64_t utc_val_to_local_val( + cdef int64_t utc_val_to_local_val( self, int64_t utc_val, Py_ssize_t* pos, bint* fold=NULL ) except? -1: if self.use_utc: From 1064e15ca82faeba8fe2b787d295199e5bb23992 Mon Sep 17 00:00:00 2001 From: Will Ayd Date: Mon, 28 Nov 2022 20:05:33 -0800 Subject: [PATCH 2/5] removed all cdef inline --- pandas/_libs/algos.pyx | 6 ++-- pandas/_libs/groupby.pyx | 8 ++--- pandas/_libs/hashing.pyx | 6 ++-- pandas/_libs/hashtable_class_helper.pxi.in | 4 +-- pandas/_libs/index.pyx | 2 +- pandas/_libs/lib.pyx | 6 ++-- pandas/_libs/missing.pyx | 6 ++-- pandas/_libs/parsers.pyx | 10 +++--- pandas/_libs/sparse_op_helper.pxi.in | 12 +++---- pandas/_libs/tslib.pyx | 2 +- pandas/_libs/tslibs/conversion.pyx | 10 +++--- pandas/_libs/tslibs/fields.pyx | 10 +++--- pandas/_libs/tslibs/nattype.pyx | 6 ++-- pandas/_libs/tslibs/np_datetime.pyx | 18 +++++----- pandas/_libs/tslibs/offsets.pyx | 8 ++--- pandas/_libs/tslibs/parsing.pyx | 16 ++++----- pandas/_libs/tslibs/period.pyx | 24 ++++++------- pandas/_libs/tslibs/timedeltas.pyx | 12 +++---- pandas/_libs/tslibs/timestamps.pyx | 4 +-- pandas/_libs/tslibs/timezones.pyx | 12 +++---- pandas/_libs/tslibs/tzconversion.pyx | 4 +-- pandas/_libs/tslibs/util.pxd | 28 +++++++-------- pandas/_libs/tslibs/vectorized.pyx | 2 +- pandas/_libs/window/aggregations.pyx | 42 +++++++++++----------- pandas/io/sas/byteswap.pyx | 4 +-- pandas/io/sas/sas.pyx | 10 +++--- 26 files changed, 136 insertions(+), 136 deletions(-) diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 7b9fe6422544c..ca2b20bdb1d5c 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -70,7 +70,7 @@ tiebreakers = { } -cdef inline bint are_diff(object left, object right): +cdef bint are_diff(object left, object right): try: return fabs(left - right) > FP_ERR except TypeError: @@ -257,7 +257,7 @@ def groupsort_indexer(const intp_t[:] index, Py_ssize_t ngroups): return indexer.base, counts.base -cdef inline Py_ssize_t swap(numeric_t *a, numeric_t *b) nogil: +cdef Py_ssize_t swap(numeric_t *a, numeric_t *b) nogil: cdef: numeric_t t @@ -268,7 +268,7 @@ cdef inline Py_ssize_t swap(numeric_t *a, numeric_t *b) nogil: return 0 -cdef inline numeric_t kth_smallest_c(numeric_t* arr, Py_ssize_t k, Py_ssize_t n) nogil: +cdef numeric_t kth_smallest_c(numeric_t* arr, Py_ssize_t k, Py_ssize_t n) nogil: """ See kth_smallest.__doc__. The additional parameter n specifies the maximum number of elements considered in arr, needed for compatibility with usage diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index a351ad6e461f3..5b6b5fcfc9e74 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -60,7 +60,7 @@ cdef enum InterpolationEnumType: INTERPOLATION_MIDPOINT -cdef inline float64_t median_linear_mask(float64_t* a, int n, uint8_t* mask) nogil: +cdef float64_t median_linear_mask(float64_t* a, int n, uint8_t* mask) nogil: cdef: int i, j, na_count = 0 float64_t* tmp @@ -97,7 +97,7 @@ cdef inline float64_t median_linear_mask(float64_t* a, int n, uint8_t* mask) nog return result -cdef inline float64_t median_linear(float64_t* a, int n) nogil: +cdef float64_t median_linear(float64_t* a, int n) nogil: cdef: int i, j, na_count = 0 float64_t* tmp @@ -134,7 +134,7 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: return result -cdef inline float64_t calc_median_linear(float64_t* a, int n, int na_count) nogil: +cdef float64_t calc_median_linear(float64_t* a, int n, int na_count) nogil: cdef: float64_t result @@ -1231,7 +1231,7 @@ def group_quantile( # group_nth, group_last, group_rank # ---------------------------------------------------------------------- -cdef inline bint _treat_as_na(numeric_object_t val, bint is_datetimelike) nogil: +cdef bint _treat_as_na(numeric_object_t val, bint is_datetimelike) nogil: if numeric_object_t is object: # Should never be used, but we need to avoid the `val != val` below # or else cython will raise about gil acquisition. diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index 64f753f13a624..b4c2483e2d460 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -110,11 +110,11 @@ def hash_object_array( return result.base # .base to retrieve underlying np.ndarray -cdef inline uint64_t _rotl(uint64_t x, uint64_t b) nogil: +cdef uint64_t _rotl(uint64_t x, uint64_t b) nogil: return (x << b) | (x >> (64 - b)) -cdef inline uint64_t u8to64_le(uint8_t* p) nogil: +cdef uint64_t u8to64_le(uint8_t* p) nogil: return (p[0] | p[1] << 8 | p[2] << 16 | @@ -125,7 +125,7 @@ cdef inline uint64_t u8to64_le(uint8_t* p) nogil: p[7] << 56) -cdef inline void _sipround(uint64_t* v0, uint64_t* v1, +cdef void _sipround(uint64_t* v0, uint64_t* v1, uint64_t* v2, uint64_t* v3) nogil: v0[0] += v1[0] v1[0] = _rotl(v1[0], 13) diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 6f51e317de57e..06ad614b4f963 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -140,7 +140,7 @@ ctypedef struct {{name}}VectorData: @cython.wraparound(False) @cython.boundscheck(False) -cdef inline void append_data_{{dtype}}({{name}}VectorData *data, +cdef void append_data_{{dtype}}({{name}}VectorData *data, {{c_type}} x) nogil: data.data[data.n] = x @@ -163,7 +163,7 @@ ctypedef fused vector_data: Complex64VectorData StringVectorData -cdef inline bint needs_resize(vector_data *data) nogil: +cdef bint needs_resize(vector_data *data) nogil: return data.n == data.m # ---------------------------------------------------------------------- diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index ec54400bd4a33..13e870de04dcf 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -40,7 +40,7 @@ from pandas._libs.missing cimport ( multiindex_nulls_shift = 2 -cdef inline bint is_definitely_invalid_key(object val): +cdef bint is_definitely_invalid_key(object val): try: hash(val) except TypeError: diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index de11820d45af4..4758806608d0c 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -231,7 +231,7 @@ def is_scalar(val: object) -> bool: or is_offset_object(val)) -cdef inline int64_t get_itemsize(object val): +cdef int64_t get_itemsize(object val): """ Get the itemsize of a NumPy scalar, -1 if not a NumPy scalar. @@ -1083,7 +1083,7 @@ def is_list_like(obj: object, allow_sets: bool = True) -> bool: return c_is_list_like(obj, allow_sets) -cdef inline bint c_is_list_like(object obj, bint allow_sets) except -1: +cdef bint c_is_list_like(object obj, bint allow_sets) except -1: # first, performance short-cuts for the most common cases if util.is_array(obj): # exclude zero-dimensional numpy arrays, effectively scalars @@ -1567,7 +1567,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: return "mixed" -cdef inline bint is_timedelta(object o): +cdef bint is_timedelta(object o): return PyDelta_Check(o) or util.is_timedelta64_object(o) diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index b32061fbca0e2..a3b0451381ad2 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -167,7 +167,7 @@ cpdef bint checknull(object val, bint inf_as_na=False): return is_decimal_na(val) -cdef inline bint is_decimal_na(object val): +cdef bint is_decimal_na(object val): """ Is this a decimal.Decimal object Decimal("NAN"). """ @@ -258,7 +258,7 @@ def isneginf_scalar(val: object) -> bool: return util.is_float_object(val) and val == NEGINF -cdef inline bint is_null_datetime64(v): +cdef bint is_null_datetime64(v): # determine if we have a null for a datetime (or integer versions), # excluding np.timedelta64('nat') if checknull_with_nat(v) or is_dt64nat(v): @@ -266,7 +266,7 @@ cdef inline bint is_null_datetime64(v): return False -cdef inline bint is_null_timedelta64(v): +cdef bint is_null_timedelta64(v): # determine if we have a null for a timedelta (or integer versions), # excluding np.datetime64('nat') if checknull_with_nat(v) or is_td64nat(v): diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index deb85fa9bdedd..ca43587127b03 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1585,7 +1585,7 @@ cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start, return result -cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col, +cdef void _to_fw_string_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, size_t width, char *data) nogil: cdef: @@ -1640,7 +1640,7 @@ cdef _try_double(parser_t *parser, int64_t col, return result, na_count -cdef inline int _try_double_nogil(parser_t *parser, +cdef int _try_double_nogil(parser_t *parser, float64_t (*double_converter)( const char *, char **, char, char, char, int, int *, int *) nogil, @@ -1749,7 +1749,7 @@ cdef _try_uint64(parser_t *parser, int64_t col, return result -cdef inline int _try_uint64_nogil(parser_t *parser, int64_t col, +cdef int _try_uint64_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, const kh_str_starts_t *na_hashset, @@ -1813,7 +1813,7 @@ cdef _try_int64(parser_t *parser, int64_t col, return result, na_count -cdef inline int _try_int64_nogil(parser_t *parser, int64_t col, +cdef int _try_int64_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, const kh_str_starts_t *na_hashset, int64_t NA, @@ -1876,7 +1876,7 @@ cdef _try_bool_flex(parser_t *parser, int64_t col, return result.view(np.bool_), na_count -cdef inline int _try_bool_flex_nogil(parser_t *parser, int64_t col, +cdef int _try_bool_flex_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, const kh_str_starts_t *na_hashset, diff --git a/pandas/_libs/sparse_op_helper.pxi.in b/pandas/_libs/sparse_op_helper.pxi.in index e6a2c7b1b050a..0e310e91fab74 100644 --- a/pandas/_libs/sparse_op_helper.pxi.in +++ b/pandas/_libs/sparse_op_helper.pxi.in @@ -13,7 +13,7 @@ ctypedef fused sparse_t: int64_t -cdef inline float64_t __div__(sparse_t a, sparse_t b): +cdef float64_t __div__(sparse_t a, sparse_t b): if b == 0: if a > 0: return INF @@ -25,11 +25,11 @@ cdef inline float64_t __div__(sparse_t a, sparse_t b): return float(a) / b -cdef inline float64_t __truediv__(sparse_t a, sparse_t b): +cdef float64_t __truediv__(sparse_t a, sparse_t b): return __div__(a, b) -cdef inline sparse_t __mod__(sparse_t a, sparse_t b): +cdef sparse_t __mod__(sparse_t a, sparse_t b): if b == 0: if sparse_t is float64_t: return NaN @@ -39,7 +39,7 @@ cdef inline sparse_t __mod__(sparse_t a, sparse_t b): return a % b -cdef inline sparse_t __floordiv__(sparse_t a, sparse_t b): +cdef sparse_t __floordiv__(sparse_t a, sparse_t b): if b == 0: if sparse_t is float64_t: # Match non-sparse Series behavior implemented in mask_zero_div_zero @@ -131,7 +131,7 @@ def get_dispatch(dtypes): @cython.wraparound(False) @cython.boundscheck(False) -cdef inline tuple block_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, +cdef tuple block_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, BlockIndex xindex, {{dtype}}_t xfill, {{dtype}}_t[:] y_, @@ -232,7 +232,7 @@ cdef inline tuple block_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, @cython.wraparound(False) @cython.boundscheck(False) -cdef inline tuple int_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, +cdef tuple int_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, IntIndex xindex, {{dtype}}_t xfill, {{dtype}}_t[:] y_, diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index e01de6b70470e..34ff5a3b7dd19 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -856,7 +856,7 @@ cdef _array_to_datetime_object( return oresult, None -cdef inline bint _parse_today_now(str val, int64_t* iresult, bint utc): +cdef bint _parse_today_now(str val, int64_t* iresult, bint utc): # We delay this check for as long as possible # because it catches relatively rare cases if val == "now": diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 923dfa3c54d26..8d3d648231fb6 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -80,7 +80,7 @@ TD64NS_DTYPE = np.dtype('m8[ns]') # ---------------------------------------------------------------------- # Unit Conversion Helpers -cdef inline int64_t cast_from_unit(object ts, str unit) except? -1: +cdef int64_t cast_from_unit(object ts, str unit) except? -1: """ Return a casting of the unit represented to nanoseconds round the fractional part of a float to our precision, p. @@ -167,7 +167,7 @@ cpdef inline (int64_t, int) precision_from_unit(str unit): return m, p -cdef inline int64_t get_datetime64_nanos(object val, NPY_DATETIMEUNIT reso) except? -1: +cdef int64_t get_datetime64_nanos(object val, NPY_DATETIMEUNIT reso) except? -1: """ Extract the value and unit from a np.datetime64 object, then convert the value to nanoseconds if necessary. @@ -524,7 +524,7 @@ cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, str unit, return convert_datetime_to_tsobject(dt, tz) -cdef inline check_overflows(_TSObject obj, NPY_DATETIMEUNIT reso=NPY_FR_ns): +cdef check_overflows(_TSObject obj, NPY_DATETIMEUNIT reso=NPY_FR_ns): """ Check that we haven't silently overflowed in timezone conversion @@ -567,7 +567,7 @@ cdef inline check_overflows(_TSObject obj, NPY_DATETIMEUNIT reso=NPY_FR_ns): # ---------------------------------------------------------------------- # Localization -cdef inline void _localize_tso(_TSObject obj, tzinfo tz, NPY_DATETIMEUNIT reso): +cdef void _localize_tso(_TSObject obj, tzinfo tz, NPY_DATETIMEUNIT reso): """ Given the UTC nanosecond timestamp in obj.value, find the wall-clock representation of that timestamp in the given timezone. @@ -609,7 +609,7 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz, NPY_DATETIMEUNIT reso): obj.tzinfo = tz -cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz): +cdef datetime _localize_pydatetime(datetime dt, tzinfo tz): """ Take a datetime/Timestamp in UTC and localizes to timezone tz. diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index dda26ad3bebc6..1cc7fa2e9b67a 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -189,7 +189,7 @@ def get_date_name_field( return out -cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil: +cdef bint _is_on_month(int month, int compare_month, int modby) nogil: """ Analogous to DateOffset.is_on_offset checking for the month part of a date. """ @@ -682,7 +682,7 @@ class RoundTo: return 4 -cdef inline ndarray[int64_t] _floor_int64(const int64_t[:] values, int64_t unit): +cdef ndarray[int64_t] _floor_int64(const int64_t[:] values, int64_t unit): cdef: Py_ssize_t i, n = len(values) ndarray[int64_t] result = np.empty(n, dtype="i8") @@ -700,7 +700,7 @@ cdef inline ndarray[int64_t] _floor_int64(const int64_t[:] values, int64_t unit) return result -cdef inline ndarray[int64_t] _ceil_int64(const int64_t[:] values, int64_t unit): +cdef ndarray[int64_t] _ceil_int64(const int64_t[:] values, int64_t unit): cdef: Py_ssize_t i, n = len(values) ndarray[int64_t] result = np.empty(n, dtype="i8") @@ -724,11 +724,11 @@ cdef inline ndarray[int64_t] _ceil_int64(const int64_t[:] values, int64_t unit): return result -cdef inline ndarray[int64_t] _rounddown_int64(values, int64_t unit): +cdef ndarray[int64_t] _rounddown_int64(values, int64_t unit): return _ceil_int64(values - unit // 2, unit) -cdef inline ndarray[int64_t] _roundup_int64(values, int64_t unit): +cdef ndarray[int64_t] _roundup_int64(values, int64_t unit): return _floor_int64(values + unit // 2, unit) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index dcb7358d8e69a..2be85f2c54a7b 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -1218,14 +1218,14 @@ NaT = c_NaT # Python-visible # ---------------------------------------------------------------------- -cdef inline bint checknull_with_nat(object val): +cdef bint checknull_with_nat(object val): """ Utility to check if a value is a nat or not. """ return val is None or util.is_nan(val) or val is c_NaT -cdef inline bint is_dt64nat(object val): +cdef bint is_dt64nat(object val): """ Is this a np.datetime64 object np.datetime64("NaT"). """ @@ -1234,7 +1234,7 @@ cdef inline bint is_dt64nat(object val): return False -cdef inline bint is_td64nat(object val): +cdef bint is_td64nat(object val): """ Is this a np.timedelta64 object np.timedelta64("NaT"). """ diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index d49c41e54764f..057748f2d6fb4 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -59,7 +59,7 @@ cdef extern from "src/datetime/np_datetime_strings.h": # ---------------------------------------------------------------------- # numpy object inspection -cdef inline npy_datetime get_datetime64_value(object obj) nogil: +cdef npy_datetime get_datetime64_value(object obj) nogil: """ returns the int64 value underlying scalar numpy datetime64 object @@ -69,14 +69,14 @@ cdef inline npy_datetime get_datetime64_value(object obj) nogil: return (obj).obval -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: +cdef npy_timedelta get_timedelta64_value(object obj) nogil: """ returns the int64 value underlying scalar numpy timedelta64 object """ return (obj).obval -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: +cdef NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ @@ -136,7 +136,7 @@ cdef bint cmp_dtstructs( return cmp_res == -1 or cmp_res == 0 -cdef inline bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1: +cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1: """ cmp_scalar is a more performant version of PyObject_RichCompare typed for int64_t arguments. @@ -229,7 +229,7 @@ def py_td64_to_tdstruct(int64_t td64, NPY_DATETIMEUNIT unit): return tds # <- returned as a dict to python -cdef inline void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts): +cdef void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts): if PyDateTime_CheckExact(dt): dts.year = PyDateTime_GET_YEAR(dt) else: @@ -246,7 +246,7 @@ cdef inline void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts): dts.ps = dts.as = 0 -cdef inline int64_t pydatetime_to_dt64(datetime val, +cdef int64_t pydatetime_to_dt64(datetime val, npy_datetimestruct *dts, NPY_DATETIMEUNIT reso=NPY_FR_ns): """ @@ -256,7 +256,7 @@ cdef inline int64_t pydatetime_to_dt64(datetime val, return npy_datetimestruct_to_datetime(reso, dts) -cdef inline void pydate_to_dtstruct(date val, npy_datetimestruct *dts): +cdef void pydate_to_dtstruct(date val, npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -264,14 +264,14 @@ cdef inline void pydate_to_dtstruct(date val, npy_datetimestruct *dts): dts.ps = dts.as = 0 return -cdef inline int64_t pydate_to_dt64( +cdef int64_t pydate_to_dt64( date val, npy_datetimestruct *dts, NPY_DATETIMEUNIT reso=NPY_FR_ns ): pydate_to_dtstruct(val, dts) return npy_datetimestruct_to_datetime(reso, dts) -cdef inline int string_to_dts( +cdef int string_to_dts( str val, npy_datetimestruct* dts, NPY_DATETIMEUNIT* out_bestunit, diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 8e022ac662d21..30a30d6b65bc2 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3963,14 +3963,14 @@ cdef datetime _shift_day(datetime other, int days): return localize_pydatetime(shifted, tz) -cdef inline int year_add_months(npy_datetimestruct dts, int months) nogil: +cdef int year_add_months(npy_datetimestruct dts, int months) nogil: """ New year number after shifting npy_datetimestruct number of months. """ return dts.year + (dts.month + months - 1) // 12 -cdef inline int month_add_months(npy_datetimestruct dts, int months) nogil: +cdef int month_add_months(npy_datetimestruct dts, int months) nogil: """ New month number after shifting npy_datetimestruct number of months. @@ -4264,7 +4264,7 @@ def shift_month(stamp: datetime, months: int, day_opt: object = None) -> datetim return stamp.replace(year=year, month=month, day=day) -cdef inline int get_day_of_month(npy_datetimestruct* dts, str day_opt) nogil: +cdef int get_day_of_month(npy_datetimestruct* dts, str day_opt) nogil: """ Find the day in `other`'s month that satisfies a DateOffset's is_on_offset policy, as described by the `day_opt` argument. @@ -4375,7 +4375,7 @@ def roll_qtrday(other: datetime, n: int, month: int, return _roll_qtrday(&dts, n, months_since, day_opt) -cdef inline int _roll_qtrday(npy_datetimestruct* dts, +cdef int _roll_qtrday(npy_datetimestruct* dts, int n, int months_since, str day_opt) nogil except? -1: diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 232169f3844b3..e567fab93eebe 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -102,24 +102,24 @@ cdef: int MAX_DAYS_IN_MONTH = 31, MAX_MONTH = 12 -cdef inline bint _is_delimiter(const char ch): +cdef bint _is_delimiter(const char ch): return strchr(delimiters, ch) != NULL -cdef inline int _parse_1digit(const char* s): +cdef int _parse_1digit(const char* s): cdef int result = 0 result += getdigit_ascii(s[0], -10) * 1 return result -cdef inline int _parse_2digit(const char* s): +cdef int _parse_2digit(const char* s): cdef int result = 0 result += getdigit_ascii(s[0], -10) * 10 result += getdigit_ascii(s[1], -100) * 1 return result -cdef inline int _parse_4digit(const char* s): +cdef int _parse_4digit(const char* s): cdef int result = 0 result += getdigit_ascii(s[0], -10) * 1000 result += getdigit_ascii(s[1], -100) * 100 @@ -128,7 +128,7 @@ cdef inline int _parse_4digit(const char* s): return result -cdef inline object _parse_delimited_date(str date_string, bint dayfirst): +cdef object _parse_delimited_date(str date_string, bint dayfirst): """ Parse special cases of dates: MM/DD/YYYY, DD/MM/YYYY, MM/YYYY. @@ -234,7 +234,7 @@ cdef inline object _parse_delimited_date(str date_string, bint dayfirst): raise DateParseError(f"Invalid date specified ({month}/{day})") -cdef inline bint does_string_look_like_time(str parse_string): +cdef bint does_string_look_like_time(str parse_string): """ Checks whether given string is a time: it has to start either from H:MM or from HH:MM, and hour and minute values must be valid. @@ -500,7 +500,7 @@ cpdef bint _does_string_look_like_datetime(str py_string): return True -cdef inline object _parse_dateabbr_string(object date_string, datetime default, +cdef object _parse_dateabbr_string(object date_string, datetime default, str freq=None): cdef: object ret @@ -1074,7 +1074,7 @@ cdef str _fill_token(token: str, padding: int): @cython.wraparound(False) @cython.boundscheck(False) -cdef inline object convert_to_unicode(object item, bint keep_trivial_numbers): +cdef object convert_to_unicode(object item, bint keep_trivial_numbers): """ Convert `item` to str. diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0e7cfa4dd9670..44d504de2a005 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -311,18 +311,18 @@ cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, return DtoB_weekday(unix_date) -cdef inline int64_t upsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil: +cdef int64_t upsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil: if af_info.is_end: return (ordinal + 1) * af_info.intraday_conversion_factor - 1 else: return ordinal * af_info.intraday_conversion_factor -cdef inline int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil: +cdef int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil: return ordinal // af_info.intraday_conversion_factor -cdef inline int64_t transform_via_day(int64_t ordinal, +cdef int64_t transform_via_day(int64_t ordinal, asfreq_info *af_info, freq_conv_func first_func, freq_conv_func second_func) nogil: @@ -677,12 +677,12 @@ cdef char* c_strftime(npy_datetimestruct *dts, char *fmt): # ---------------------------------------------------------------------- # Conversion between date_info and npy_datetimestruct -cdef inline int get_freq_group(int freq) nogil: +cdef int get_freq_group(int freq) nogil: # See also FreqGroup.get_freq_group return (freq // 1000) * 1000 -cdef inline int get_freq_group_index(int freq) nogil: +cdef int get_freq_group_index(int freq) nogil: return freq // 1000 @@ -721,12 +721,12 @@ cdef int64_t unix_date_from_ymd(int year, int month, int day) nogil: return unix_date -cdef inline int64_t dts_to_month_ordinal(npy_datetimestruct* dts) nogil: +cdef int64_t dts_to_month_ordinal(npy_datetimestruct* dts) nogil: # AKA: use npy_datetimestruct_to_datetime(NPY_FR_M, &dts) return ((dts.year - 1970) * 12 + dts.month - 1) -cdef inline int64_t dts_to_year_ordinal(npy_datetimestruct *dts, int to_end) nogil: +cdef int64_t dts_to_year_ordinal(npy_datetimestruct *dts, int to_end) nogil: cdef: int64_t result @@ -737,7 +737,7 @@ cdef inline int64_t dts_to_year_ordinal(npy_datetimestruct *dts, int to_end) nog return result -cdef inline int64_t dts_to_qtr_ordinal(npy_datetimestruct* dts, int to_end) nogil: +cdef int64_t dts_to_qtr_ordinal(npy_datetimestruct* dts, int to_end) nogil: cdef: int quarter @@ -746,7 +746,7 @@ cdef inline int64_t dts_to_qtr_ordinal(npy_datetimestruct* dts, int to_end) nogi return ((dts.year - 1970) * 4 + quarter - 1) -cdef inline int get_anchor_month(int freq, int freq_group) nogil: +cdef int get_anchor_month(int freq, int freq_group) nogil: cdef: int fmonth fmonth = freq - freq_group @@ -930,7 +930,7 @@ cdef int get_yq(int64_t ordinal, int freq, npy_datetimestruct* dts): return quarter -cdef inline int month_to_quarter(int month) nogil: +cdef int month_to_quarter(int month) nogil: return (month - 1) // 3 + 1 @@ -1027,7 +1027,7 @@ cdef int calc_a_year_end(int freq, int group) nogil: return result -cdef inline int calc_week_end(int freq, int group) nogil: +cdef int calc_week_end(int freq, int group) nogil: return freq - group @@ -1465,7 +1465,7 @@ def extract_ordinals(ndarray values, freq) -> np.ndarray: return ordinals -cdef inline int64_t _extract_ordinal(object item, str freqstr, freq) except? -1: +cdef int64_t _extract_ordinal(object item, str freqstr, freq) except? -1: """ See extract_ordinals. """ diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index c1d7229bf8a54..8b27882d22b5e 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -455,7 +455,7 @@ def array_to_timedelta64( return result -cdef inline int64_t _item_to_timedelta64_fastpath(object item) except? -1: +cdef int64_t _item_to_timedelta64_fastpath(object item) except? -1: """ See array_to_timedelta64. """ @@ -467,7 +467,7 @@ cdef inline int64_t _item_to_timedelta64_fastpath(object item) except? -1: return parse_timedelta_string(item) -cdef inline int64_t _item_to_timedelta64( +cdef int64_t _item_to_timedelta64( object item, str parsed_unit, str errors @@ -488,7 +488,7 @@ cdef inline int64_t _item_to_timedelta64( raise -cdef inline int64_t parse_timedelta_string(str ts) except? -1: +cdef int64_t parse_timedelta_string(str ts) except? -1: """ Parse a regular format timedelta string. Return an int64_t (in ns) or raise a ValueError on an invalid parse. @@ -658,7 +658,7 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1: return result -cdef inline int64_t timedelta_as_neg(int64_t value, bint neg): +cdef int64_t timedelta_as_neg(int64_t value, bint neg): """ Parameters @@ -671,7 +671,7 @@ cdef inline int64_t timedelta_as_neg(int64_t value, bint neg): return value -cdef inline timedelta_from_spec(object number, object frac, object unit): +cdef timedelta_from_spec(object number, object frac, object unit): """ Parameters @@ -813,7 +813,7 @@ def _binary_op_method_timedeltalike(op, name): # ---------------------------------------------------------------------- # Timedelta Construction -cdef inline int64_t parse_iso_format_string(str ts) except? -1: +cdef int64_t parse_iso_format_string(str ts) except? -1: """ Extracts and cleanses the appropriate values from a match object with groups for each component of an ISO 8601 duration diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b23e169ce4fd2..911746c39578b 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -126,7 +126,7 @@ _no_input = object() # ---------------------------------------------------------------------- -cdef inline _Timestamp create_timestamp_from_ts( +cdef _Timestamp create_timestamp_from_ts( int64_t value, npy_datetimestruct dts, tzinfo tz, @@ -2219,7 +2219,7 @@ Timestamp.daysinmonth = Timestamp.days_in_month @cython.cdivision(False) -cdef inline int64_t normalize_i8_stamp(int64_t local_val, int64_t ppd) nogil: +cdef int64_t normalize_i8_stamp(int64_t local_val, int64_t ppd) nogil: """ Round the localized nanosecond timestamp down to the previous midnight. diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index abf8bbc5ca5b9..b3b6975aeca09 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -57,7 +57,7 @@ cdef tzinfo utc_zoneinfo = None # ---------------------------------------------------------------------- -cdef inline bint is_utc_zoneinfo(tzinfo tz): +cdef bint is_utc_zoneinfo(tzinfo tz): # Workaround for cases with missing tzdata # https://github.com/pandas-dev/pandas/pull/46425#discussion_r830633025 if tz is None or zoneinfo is None: @@ -86,22 +86,22 @@ cpdef inline bint is_utc(tzinfo tz): ) -cdef inline bint is_zoneinfo(tzinfo tz): +cdef bint is_zoneinfo(tzinfo tz): if ZoneInfo is None: return False return isinstance(tz, ZoneInfo) -cdef inline bint is_tzlocal(tzinfo tz): +cdef bint is_tzlocal(tzinfo tz): return isinstance(tz, _dateutil_tzlocal) -cdef inline bint treat_tz_as_pytz(tzinfo tz): +cdef bint treat_tz_as_pytz(tzinfo tz): return (hasattr(tz, '_utc_transition_times') and hasattr(tz, '_transition_info')) -cdef inline bint treat_tz_as_dateutil(tzinfo tz): +cdef bint treat_tz_as_dateutil(tzinfo tz): return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx') @@ -192,7 +192,7 @@ def _p_tz_cache_key(tz: tzinfo): dst_cache = {} -cdef inline object tz_cache_key(tzinfo tz): +cdef object tz_cache_key(tzinfo tz): """ Return the key in the cache for the timezone info object or None if unknown. diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index ba65b9d593057..429df9124da90 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -398,7 +398,7 @@ timedelta-like} return result.base # .base to get underlying ndarray -cdef inline Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n): +cdef Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n): # Caller is responsible for checking n > 0 # This looks very similar to local_search_right in the ndarray.searchsorted # implementation. @@ -427,7 +427,7 @@ cdef inline Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n) return left -cdef inline str _render_tstamp(int64_t val, NPY_DATETIMEUNIT creso): +cdef str _render_tstamp(int64_t val, NPY_DATETIMEUNIT creso): """ Helper function to render exception messages""" from pandas._libs.tslibs.timestamps import Timestamp ts = Timestamp._from_value_and_reso(val, creso, None) diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index a28aace5d2f15..8f657183afcee 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -52,14 +52,14 @@ cdef extern from "numpy/npy_common.h": int64_t NPY_MIN_INT64 -cdef inline int64_t get_nat(): +cdef int64_t get_nat(): return NPY_MIN_INT64 # -------------------------------------------------------------------- # Type Checking -cdef inline bint is_integer_object(object obj) nogil: +cdef bint is_integer_object(object obj) nogil: """ Cython equivalent of @@ -81,7 +81,7 @@ cdef inline bint is_integer_object(object obj) nogil: and not is_timedelta64_object(obj)) -cdef inline bint is_float_object(object obj) nogil: +cdef bint is_float_object(object obj) nogil: """ Cython equivalent of `isinstance(val, (float, np.complex_))` @@ -97,7 +97,7 @@ cdef inline bint is_float_object(object obj) nogil: (PyObject_TypeCheck(obj, &PyFloatingArrType_Type))) -cdef inline bint is_complex_object(object obj) nogil: +cdef bint is_complex_object(object obj) nogil: """ Cython equivalent of `isinstance(val, (complex, np.complex_))` @@ -113,7 +113,7 @@ cdef inline bint is_complex_object(object obj) nogil: PyObject_TypeCheck(obj, &PyComplexFloatingArrType_Type)) -cdef inline bint is_bool_object(object obj) nogil: +cdef bint is_bool_object(object obj) nogil: """ Cython equivalent of `isinstance(val, (bool, np.bool_))` @@ -129,11 +129,11 @@ cdef inline bint is_bool_object(object obj) nogil: PyObject_TypeCheck(obj, &PyBoolArrType_Type)) -cdef inline bint is_real_number_object(object obj) nogil: +cdef bint is_real_number_object(object obj) nogil: return is_bool_object(obj) or is_integer_object(obj) or is_float_object(obj) -cdef inline bint is_timedelta64_object(object obj) nogil: +cdef bint is_timedelta64_object(object obj) nogil: """ Cython equivalent of `isinstance(val, np.timedelta64)` @@ -148,7 +148,7 @@ cdef inline bint is_timedelta64_object(object obj) nogil: return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) -cdef inline bint is_datetime64_object(object obj) nogil: +cdef bint is_datetime64_object(object obj) nogil: """ Cython equivalent of `isinstance(val, np.datetime64)` @@ -163,7 +163,7 @@ cdef inline bint is_datetime64_object(object obj) nogil: return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) -cdef inline bint is_array(object val): +cdef bint is_array(object val): """ Cython equivalent of `isinstance(val, np.ndarray)` @@ -178,7 +178,7 @@ cdef inline bint is_array(object val): return PyArray_Check(val) -cdef inline bint is_nan(object val): +cdef bint is_nan(object val): """ Check if val is a Not-A-Number float or complex, including float('NaN') and np.nan. @@ -198,7 +198,7 @@ cdef inline bint is_nan(object val): return is_complex_object(val) and val != val -cdef inline const char* get_c_string_buf_and_size(str py_string, +cdef const char* get_c_string_buf_and_size(str py_string, Py_ssize_t *length) except NULL: """ Extract internal char* buffer of unicode or bytes object `py_string` with @@ -221,15 +221,15 @@ cdef inline const char* get_c_string_buf_and_size(str py_string, return PyUnicode_AsUTF8AndSize(py_string, length) -cdef inline const char* get_c_string(str py_string) except NULL: +cdef const char* get_c_string(str py_string) except NULL: return get_c_string_buf_and_size(py_string, NULL) -cdef inline bytes string_encode_locale(str py_string): +cdef bytes string_encode_locale(str py_string): """As opposed to PyUnicode_Encode, use current system locale to encode.""" return PyUnicode_EncodeLocale(py_string, NULL) -cdef inline object char_to_string_locale(const char* data): +cdef object char_to_string_locale(const char* data): """As opposed to PyUnicode_FromString, use current system locale to decode.""" return PyUnicode_DecodeLocale(data, NULL) diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index 4763ea2f6b748..06e09d890de69 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -198,7 +198,7 @@ def ints_to_pydatetime( # ------------------------------------------------------------------------- -cdef inline c_Resolution _reso_stamp(npy_datetimestruct *dts): +cdef c_Resolution _reso_stamp(npy_datetimestruct *dts): if dts.ps != 0: return c_Resolution.RESO_NS elif dts.us != 0: diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 702706f00455b..07931022bf937 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -69,7 +69,7 @@ cdef bint is_monotonic_increasing_start_end_bounds( # Rolling sum -cdef inline float64_t calc_sum(int64_t minp, int64_t nobs, float64_t sum_x, +cdef float64_t calc_sum(int64_t minp, int64_t nobs, float64_t sum_x, int64_t num_consecutive_same_value, float64_t prev_value ) nogil: cdef: @@ -88,7 +88,7 @@ cdef inline float64_t calc_sum(int64_t minp, int64_t nobs, float64_t sum_x, return result -cdef inline void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x, +cdef void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x, float64_t *compensation, int64_t *num_consecutive_same_value, float64_t *prev_value) nogil: """ add a value from the sum calc using Kahan summation """ @@ -113,7 +113,7 @@ cdef inline void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x, prev_value[0] = val -cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x, +cdef void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x, float64_t *compensation) nogil: """ remove a value from the sum calc using Kahan summation """ @@ -188,7 +188,7 @@ def roll_sum(const float64_t[:] values, ndarray[int64_t] start, # Rolling mean -cdef inline float64_t calc_mean(int64_t minp, Py_ssize_t nobs, Py_ssize_t neg_ct, +cdef float64_t calc_mean(int64_t minp, Py_ssize_t nobs, Py_ssize_t neg_ct, float64_t sum_x, int64_t num_consecutive_same_value, float64_t prev_value) nogil: cdef: @@ -211,7 +211,7 @@ cdef inline float64_t calc_mean(int64_t minp, Py_ssize_t nobs, Py_ssize_t neg_ct return result -cdef inline void add_mean( +cdef void add_mean( float64_t val, Py_ssize_t *nobs, float64_t *sum_x, @@ -243,7 +243,7 @@ cdef inline void add_mean( prev_value[0] = val -cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, +cdef void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, Py_ssize_t *neg_ct, float64_t *compensation) nogil: """ remove a value from the mean calc using Kahan summation """ cdef: @@ -319,7 +319,7 @@ def roll_mean(const float64_t[:] values, ndarray[int64_t] start, # Rolling variance -cdef inline float64_t calc_var( +cdef float64_t calc_var( int64_t minp, int ddof, float64_t nobs, @@ -343,7 +343,7 @@ cdef inline float64_t calc_var( return result -cdef inline void add_var( +cdef void add_var( float64_t val, float64_t *nobs, float64_t *mean_x, @@ -385,7 +385,7 @@ cdef inline void add_var( ssqdm_x[0] = ssqdm_x[0] + (val - prev_mean) * (val - mean_x[0]) -cdef inline void remove_var( +cdef void remove_var( float64_t val, float64_t *nobs, float64_t *mean_x, @@ -480,7 +480,7 @@ def roll_var(const float64_t[:] values, ndarray[int64_t] start, # Rolling skewness -cdef inline float64_t calc_skew(int64_t minp, int64_t nobs, +cdef float64_t calc_skew(int64_t minp, int64_t nobs, float64_t x, float64_t xx, float64_t xxx, int64_t num_consecutive_same_value ) nogil: @@ -521,7 +521,7 @@ cdef inline float64_t calc_skew(int64_t minp, int64_t nobs, return result -cdef inline void add_skew(float64_t val, int64_t *nobs, +cdef void add_skew(float64_t val, int64_t *nobs, float64_t *x, float64_t *xx, float64_t *xxx, float64_t *compensation_x, @@ -560,7 +560,7 @@ cdef inline void add_skew(float64_t val, int64_t *nobs, prev_value[0] = val -cdef inline void remove_skew(float64_t val, int64_t *nobs, +cdef void remove_skew(float64_t val, int64_t *nobs, float64_t *x, float64_t *xx, float64_t *xxx, float64_t *compensation_x, @@ -678,7 +678,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, # Rolling kurtosis -cdef inline float64_t calc_kurt(int64_t minp, int64_t nobs, +cdef float64_t calc_kurt(int64_t minp, int64_t nobs, float64_t x, float64_t xx, float64_t xxx, float64_t xxxx, int64_t num_consecutive_same_value, @@ -724,7 +724,7 @@ cdef inline float64_t calc_kurt(int64_t minp, int64_t nobs, return result -cdef inline void add_kurt(float64_t val, int64_t *nobs, +cdef void add_kurt(float64_t val, int64_t *nobs, float64_t *x, float64_t *xx, float64_t *xxx, float64_t *xxxx, float64_t *compensation_x, @@ -768,7 +768,7 @@ cdef inline void add_kurt(float64_t val, int64_t *nobs, prev_value[0] = val -cdef inline void remove_kurt(float64_t val, int64_t *nobs, +cdef void remove_kurt(float64_t val, int64_t *nobs, float64_t *x, float64_t *xx, float64_t *xxx, float64_t *xxxx, float64_t *compensation_x, @@ -993,7 +993,7 @@ def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, # https://github.com/pydata/bottleneck -cdef inline numeric_t init_mm(numeric_t ai, Py_ssize_t *nobs, bint is_max) nogil: +cdef numeric_t init_mm(numeric_t ai, Py_ssize_t *nobs, bint is_max) nogil: if numeric_t in cython.floating: if ai == ai: @@ -1015,13 +1015,13 @@ cdef inline numeric_t init_mm(numeric_t ai, Py_ssize_t *nobs, bint is_max) nogil return ai -cdef inline void remove_mm(numeric_t aold, Py_ssize_t *nobs) nogil: +cdef void remove_mm(numeric_t aold, Py_ssize_t *nobs) nogil: """ remove a value from the mm calc """ if numeric_t in cython.floating and aold == aold: nobs[0] = nobs[0] - 1 -cdef inline numeric_t calc_mm(int64_t minp, Py_ssize_t nobs, +cdef numeric_t calc_mm(int64_t minp, Py_ssize_t nobs, numeric_t value) nogil: cdef: numeric_t result @@ -1531,7 +1531,7 @@ cdef float64_t[:] _roll_weighted_sum_mean(const float64_t[:] values, # Rolling var for weighted window -cdef inline float64_t calc_weighted_var(float64_t t, +cdef float64_t calc_weighted_var(float64_t t, float64_t sum_w, Py_ssize_t win_n, unsigned int ddof, @@ -1582,7 +1582,7 @@ cdef inline float64_t calc_weighted_var(float64_t t, return result -cdef inline void add_weighted_var(float64_t val, +cdef void add_weighted_var(float64_t val, float64_t w, float64_t *t, float64_t *sum_w, @@ -1628,7 +1628,7 @@ cdef inline void add_weighted_var(float64_t val, sum_w[0] = temp -cdef inline void remove_weighted_var(float64_t val, +cdef void remove_weighted_var(float64_t val, float64_t w, float64_t *t, float64_t *sum_w, diff --git a/pandas/io/sas/byteswap.pyx b/pandas/io/sas/byteswap.pyx index 2a4d3f66a5d7d..511af5140b563 100644 --- a/pandas/io/sas/byteswap.pyx +++ b/pandas/io/sas/byteswap.pyx @@ -81,13 +81,13 @@ cdef extern from *: uint64_t _byteswap8(uint64_t) -cdef inline float _byteswap_float(float num): +cdef float _byteswap_float(float num): cdef uint32_t *intptr = &num intptr[0] = _byteswap4(intptr[0]) return num -cdef inline double _byteswap_double(double num): +cdef double _byteswap_double(double num): cdef uint64_t *intptr = &num intptr[0] = _byteswap8(intptr[0]) return num diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index 8c13566c656b7..d24dc1f3db0e2 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -31,30 +31,30 @@ cdef struct Buffer: size_t length -cdef inline uint8_t buf_get(Buffer buf, size_t offset) except? 255: +cdef uint8_t buf_get(Buffer buf, size_t offset) except? 255: assert offset < buf.length, "Out of bounds read" return buf.data[offset] -cdef inline bint buf_set(Buffer buf, size_t offset, uint8_t value) except 0: +cdef bint buf_set(Buffer buf, size_t offset, uint8_t value) except 0: assert offset < buf.length, "Out of bounds write" buf.data[offset] = value return True -cdef inline bytes buf_as_bytes(Buffer buf, size_t offset, size_t length): +cdef bytes buf_as_bytes(Buffer buf, size_t offset, size_t length): assert offset + length <= buf.length, "Out of bounds read" return buf.data[offset:offset+length] -cdef inline Buffer buf_new(size_t length) except *: +cdef Buffer buf_new(size_t length) except *: cdef uint8_t *data = calloc(length, sizeof(uint8_t)) if data == NULL: raise MemoryError(f"Failed to allocate {length} bytes") return Buffer(data, length) -cdef inline buf_free(Buffer buf): +cdef buf_free(Buffer buf): if buf.data != NULL: free(buf.data) From dbb4986fb17e71d359a5bae9bf84668e5caedd78 Mon Sep 17 00:00:00 2001 From: Will Ayd Date: Mon, 28 Nov 2022 20:21:15 -0800 Subject: [PATCH 3/5] revert pxd --- pandas/_libs/tslibs/util.pxd | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index 8f657183afcee..a28aace5d2f15 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -52,14 +52,14 @@ cdef extern from "numpy/npy_common.h": int64_t NPY_MIN_INT64 -cdef int64_t get_nat(): +cdef inline int64_t get_nat(): return NPY_MIN_INT64 # -------------------------------------------------------------------- # Type Checking -cdef bint is_integer_object(object obj) nogil: +cdef inline bint is_integer_object(object obj) nogil: """ Cython equivalent of @@ -81,7 +81,7 @@ cdef bint is_integer_object(object obj) nogil: and not is_timedelta64_object(obj)) -cdef bint is_float_object(object obj) nogil: +cdef inline bint is_float_object(object obj) nogil: """ Cython equivalent of `isinstance(val, (float, np.complex_))` @@ -97,7 +97,7 @@ cdef bint is_float_object(object obj) nogil: (PyObject_TypeCheck(obj, &PyFloatingArrType_Type))) -cdef bint is_complex_object(object obj) nogil: +cdef inline bint is_complex_object(object obj) nogil: """ Cython equivalent of `isinstance(val, (complex, np.complex_))` @@ -113,7 +113,7 @@ cdef bint is_complex_object(object obj) nogil: PyObject_TypeCheck(obj, &PyComplexFloatingArrType_Type)) -cdef bint is_bool_object(object obj) nogil: +cdef inline bint is_bool_object(object obj) nogil: """ Cython equivalent of `isinstance(val, (bool, np.bool_))` @@ -129,11 +129,11 @@ cdef bint is_bool_object(object obj) nogil: PyObject_TypeCheck(obj, &PyBoolArrType_Type)) -cdef bint is_real_number_object(object obj) nogil: +cdef inline bint is_real_number_object(object obj) nogil: return is_bool_object(obj) or is_integer_object(obj) or is_float_object(obj) -cdef bint is_timedelta64_object(object obj) nogil: +cdef inline bint is_timedelta64_object(object obj) nogil: """ Cython equivalent of `isinstance(val, np.timedelta64)` @@ -148,7 +148,7 @@ cdef bint is_timedelta64_object(object obj) nogil: return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) -cdef bint is_datetime64_object(object obj) nogil: +cdef inline bint is_datetime64_object(object obj) nogil: """ Cython equivalent of `isinstance(val, np.datetime64)` @@ -163,7 +163,7 @@ cdef bint is_datetime64_object(object obj) nogil: return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) -cdef bint is_array(object val): +cdef inline bint is_array(object val): """ Cython equivalent of `isinstance(val, np.ndarray)` @@ -178,7 +178,7 @@ cdef bint is_array(object val): return PyArray_Check(val) -cdef bint is_nan(object val): +cdef inline bint is_nan(object val): """ Check if val is a Not-A-Number float or complex, including float('NaN') and np.nan. @@ -198,7 +198,7 @@ cdef bint is_nan(object val): return is_complex_object(val) and val != val -cdef const char* get_c_string_buf_and_size(str py_string, +cdef inline const char* get_c_string_buf_and_size(str py_string, Py_ssize_t *length) except NULL: """ Extract internal char* buffer of unicode or bytes object `py_string` with @@ -221,15 +221,15 @@ cdef const char* get_c_string_buf_and_size(str py_string, return PyUnicode_AsUTF8AndSize(py_string, length) -cdef const char* get_c_string(str py_string) except NULL: +cdef inline const char* get_c_string(str py_string) except NULL: return get_c_string_buf_and_size(py_string, NULL) -cdef bytes string_encode_locale(str py_string): +cdef inline bytes string_encode_locale(str py_string): """As opposed to PyUnicode_Encode, use current system locale to encode.""" return PyUnicode_EncodeLocale(py_string, NULL) -cdef object char_to_string_locale(const char* data): +cdef inline object char_to_string_locale(const char* data): """As opposed to PyUnicode_FromString, use current system locale to decode.""" return PyUnicode_DecodeLocale(data, NULL) From 5b1d9220858b7aa3bf6f0cba30d80fed5d5017b4 Mon Sep 17 00:00:00 2001 From: Will Ayd Date: Tue, 29 Nov 2022 20:42:56 -0800 Subject: [PATCH 4/5] removed unused code --- pandas/_libs/window/aggregations.pyx | 47 ++++++++++------------------ 1 file changed, 16 insertions(+), 31 deletions(-) diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 07931022bf937..b0774e4bbb504 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -24,8 +24,6 @@ cnp.import_array() from pandas._libs.algos import is_monotonic -from pandas._libs.dtypes cimport numeric_t - cdef extern from "../src/skiplist.h": ctypedef struct node_t: @@ -993,46 +991,33 @@ def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, # https://github.com/pydata/bottleneck -cdef numeric_t init_mm(numeric_t ai, Py_ssize_t *nobs, bint is_max) nogil: +cdef float64_t init_mm(float64_t ai, Py_ssize_t *nobs, bint is_max) nogil: - if numeric_t in cython.floating: - if ai == ai: - nobs[0] = nobs[0] + 1 - elif is_max: - if numeric_t == cython.float: - ai = MINfloat32 - else: - ai = MINfloat64 - else: - if numeric_t == cython.float: - ai = MAXfloat32 - else: - ai = MAXfloat64 - - else: + if ai == ai: nobs[0] = nobs[0] + 1 + elif is_max: + ai = MINfloat64 + else: + ai = MAXfloat64 return ai -cdef void remove_mm(numeric_t aold, Py_ssize_t *nobs) nogil: +cdef void remove_mm(float64_t aold, Py_ssize_t *nobs) nogil: """ remove a value from the mm calc """ - if numeric_t in cython.floating and aold == aold: + if aold == aold: nobs[0] = nobs[0] - 1 -cdef numeric_t calc_mm(int64_t minp, Py_ssize_t nobs, - numeric_t value) nogil: +cdef float64_t calc_mm(int64_t minp, Py_ssize_t nobs, + float64_t value) nogil: cdef: - numeric_t result + float64_t result - if numeric_t in cython.floating: - if nobs >= minp: - result = value - else: - result = NaN - else: + if nobs >= minp: result = value + else: + result = NaN return result @@ -1082,13 +1067,13 @@ def roll_min(ndarray[float64_t] values, ndarray[int64_t] start, return _roll_min_max(values, start, end, minp, is_max=0) -cdef _roll_min_max(ndarray[numeric_t] values, +cdef _roll_min_max(ndarray[float64_t] values, ndarray[int64_t] starti, ndarray[int64_t] endi, int64_t minp, bint is_max): cdef: - numeric_t ai + float64_t ai int64_t curr_win_size, start Py_ssize_t i, k, nobs = 0, N = len(starti) deque Q[int64_t] # min/max always the front From d521c551e4d313757b9b20a47157a9afffaa4acd Mon Sep 17 00:00:00 2001 From: Will Ayd Date: Wed, 30 Nov 2022 17:59:14 -0800 Subject: [PATCH 5/5] cython lint fixups --- pandas/_libs/hashing.pyx | 2 +- pandas/_libs/parsers.pyx | 58 ++++++------- pandas/_libs/tslibs/np_datetime.pyx | 4 +- pandas/_libs/tslibs/offsets.pyx | 6 +- pandas/_libs/tslibs/parsing.pyx | 2 +- pandas/_libs/tslibs/period.pyx | 6 +- pandas/_libs/window/aggregations.pyx | 117 +++++++++++++-------------- 7 files changed, 97 insertions(+), 98 deletions(-) diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index b4c2483e2d460..197ec99247b4a 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -126,7 +126,7 @@ cdef uint64_t u8to64_le(uint8_t* p) nogil: cdef void _sipround(uint64_t* v0, uint64_t* v1, - uint64_t* v2, uint64_t* v3) nogil: + uint64_t* v2, uint64_t* v3) nogil: v0[0] += v1[0] v1[0] = _rotl(v1[0], 13) v1[0] ^= v0[0] diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 5258471e8ff37..6c988d475708e 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1073,9 +1073,9 @@ cdef class TextReader: # -> tuple["ArrayLike", int]: cdef _convert_tokens(self, Py_ssize_t i, int64_t start, - int64_t end, object name, bint na_filter, - kh_str_starts_t *na_hashset, - object na_flist, object col_dtype): + int64_t end, object name, bint na_filter, + kh_str_starts_t *na_hashset, + object na_flist, object col_dtype): if col_dtype is not None: col_res, na_count = self._convert_with_dtype( @@ -1578,8 +1578,8 @@ cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start, cdef void _to_fw_string_nogil(parser_t *parser, int64_t col, - int64_t line_start, int64_t line_end, - size_t width, char *data) nogil: + int64_t line_start, int64_t line_end, + size_t width, char *data) nogil: cdef: int64_t i coliter_t it @@ -1633,15 +1633,15 @@ cdef _try_double(parser_t *parser, int64_t col, cdef int _try_double_nogil(parser_t *parser, - float64_t (*double_converter)( - const char *, char **, char, - char, char, int, int *, int *) nogil, - int64_t col, int64_t line_start, int64_t line_end, - bint na_filter, kh_str_starts_t *na_hashset, - bint use_na_flist, - const kh_float64_t *na_flist, - float64_t NA, float64_t *data, - int *na_count) nogil: + float64_t (*double_converter)( + const char *, char **, char, + char, char, int, int *, int *) nogil, + int64_t col, int64_t line_start, int64_t line_end, + bint na_filter, kh_str_starts_t *na_hashset, + bint use_na_flist, + const kh_float64_t *na_flist, + float64_t NA, float64_t *data, + int *na_count) nogil: cdef: int error = 0, Py_ssize_t i, lines = line_end - line_start @@ -1742,10 +1742,10 @@ cdef _try_uint64(parser_t *parser, int64_t col, cdef int _try_uint64_nogil(parser_t *parser, int64_t col, - int64_t line_start, - int64_t line_end, bint na_filter, - const kh_str_starts_t *na_hashset, - uint64_t *data, uint_state *state) nogil: + int64_t line_start, + int64_t line_end, bint na_filter, + const kh_str_starts_t *na_hashset, + uint64_t *data, uint_state *state) nogil: cdef: int error Py_ssize_t i, lines = line_end - line_start @@ -1806,10 +1806,10 @@ cdef _try_int64(parser_t *parser, int64_t col, cdef int _try_int64_nogil(parser_t *parser, int64_t col, - int64_t line_start, - int64_t line_end, bint na_filter, - const kh_str_starts_t *na_hashset, int64_t NA, - int64_t *data, int *na_count) nogil: + int64_t line_start, + int64_t line_end, bint na_filter, + const kh_str_starts_t *na_hashset, int64_t NA, + int64_t *data, int *na_count) nogil: cdef: int error Py_ssize_t i, lines = line_end - line_start @@ -1869,13 +1869,13 @@ cdef _try_bool_flex(parser_t *parser, int64_t col, cdef int _try_bool_flex_nogil(parser_t *parser, int64_t col, - int64_t line_start, - int64_t line_end, bint na_filter, - const kh_str_starts_t *na_hashset, - const kh_str_starts_t *true_hashset, - const kh_str_starts_t *false_hashset, - uint8_t NA, uint8_t *data, - int *na_count) nogil: + int64_t line_start, + int64_t line_end, bint na_filter, + const kh_str_starts_t *na_hashset, + const kh_str_starts_t *true_hashset, + const kh_str_starts_t *false_hashset, + uint8_t NA, uint8_t *data, + int *na_count) nogil: cdef: int error = 0 Py_ssize_t i, lines = line_end - line_start diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 057748f2d6fb4..b2804c7401746 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -247,8 +247,8 @@ cdef void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts): cdef int64_t pydatetime_to_dt64(datetime val, - npy_datetimestruct *dts, - NPY_DATETIMEUNIT reso=NPY_FR_ns): + npy_datetimestruct *dts, + NPY_DATETIMEUNIT reso=NPY_FR_ns): """ Note we are assuming that the datetime object is timezone-naive. """ diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 30a30d6b65bc2..f63dc2a745f78 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -4376,9 +4376,9 @@ def roll_qtrday(other: datetime, n: int, month: int, cdef int _roll_qtrday(npy_datetimestruct* dts, - int n, - int months_since, - str day_opt) nogil except? -1: + int n, + int months_since, + str day_opt) nogil except? -1: """ See roll_qtrday.__doc__ """ diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index e567fab93eebe..0b62642948554 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -501,7 +501,7 @@ cpdef bint _does_string_look_like_datetime(str py_string): cdef object _parse_dateabbr_string(object date_string, datetime default, - str freq=None): + str freq=None): cdef: object ret # year initialized to prevent compiler warnings diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 44d504de2a005..3d3d28cda30ff 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -323,9 +323,9 @@ cdef int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil: cdef int64_t transform_via_day(int64_t ordinal, - asfreq_info *af_info, - freq_conv_func first_func, - freq_conv_func second_func) nogil: + asfreq_info *af_info, + freq_conv_func first_func, + freq_conv_func second_func) nogil: cdef: int64_t result diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index b0774e4bbb504..e9665f30bf745 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -1,6 +1,5 @@ # cython: boundscheck=False, wraparound=False, cdivision=True -cimport cython from libc.math cimport ( round, signbit, @@ -68,8 +67,8 @@ cdef bint is_monotonic_increasing_start_end_bounds( cdef float64_t calc_sum(int64_t minp, int64_t nobs, float64_t sum_x, - int64_t num_consecutive_same_value, float64_t prev_value - ) nogil: + int64_t num_consecutive_same_value, float64_t prev_value + ) nogil: cdef: float64_t result @@ -87,8 +86,8 @@ cdef float64_t calc_sum(int64_t minp, int64_t nobs, float64_t sum_x, cdef void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x, - float64_t *compensation, int64_t *num_consecutive_same_value, - float64_t *prev_value) nogil: + float64_t *compensation, int64_t *num_consecutive_same_value, + float64_t *prev_value) nogil: """ add a value from the sum calc using Kahan summation """ cdef: @@ -112,7 +111,7 @@ cdef void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x, cdef void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x, - float64_t *compensation) nogil: + float64_t *compensation) nogil: """ remove a value from the sum calc using Kahan summation """ cdef: @@ -187,8 +186,8 @@ def roll_sum(const float64_t[:] values, ndarray[int64_t] start, cdef float64_t calc_mean(int64_t minp, Py_ssize_t nobs, Py_ssize_t neg_ct, - float64_t sum_x, int64_t num_consecutive_same_value, - float64_t prev_value) nogil: + float64_t sum_x, int64_t num_consecutive_same_value, + float64_t prev_value) nogil: cdef: float64_t result @@ -242,7 +241,7 @@ cdef void add_mean( cdef void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, - Py_ssize_t *neg_ct, float64_t *compensation) nogil: + Py_ssize_t *neg_ct, float64_t *compensation) nogil: """ remove a value from the mean calc using Kahan summation """ cdef: float64_t y, t @@ -479,9 +478,9 @@ def roll_var(const float64_t[:] values, ndarray[int64_t] start, cdef float64_t calc_skew(int64_t minp, int64_t nobs, - float64_t x, float64_t xx, float64_t xxx, - int64_t num_consecutive_same_value - ) nogil: + float64_t x, float64_t xx, float64_t xxx, + int64_t num_consecutive_same_value + ) nogil: cdef: float64_t result, dnobs float64_t A, B, C, R @@ -520,14 +519,14 @@ cdef float64_t calc_skew(int64_t minp, int64_t nobs, cdef void add_skew(float64_t val, int64_t *nobs, - float64_t *x, float64_t *xx, - float64_t *xxx, - float64_t *compensation_x, - float64_t *compensation_xx, - float64_t *compensation_xxx, - int64_t *num_consecutive_same_value, - float64_t *prev_value, - ) nogil: + float64_t *x, float64_t *xx, + float64_t *xxx, + float64_t *compensation_x, + float64_t *compensation_xx, + float64_t *compensation_xxx, + int64_t *num_consecutive_same_value, + float64_t *prev_value, + ) nogil: """ add a value from the skew calc """ cdef: float64_t y, t @@ -559,11 +558,11 @@ cdef void add_skew(float64_t val, int64_t *nobs, cdef void remove_skew(float64_t val, int64_t *nobs, - float64_t *x, float64_t *xx, - float64_t *xxx, - float64_t *compensation_x, - float64_t *compensation_xx, - float64_t *compensation_xxx) nogil: + float64_t *x, float64_t *xx, + float64_t *xxx, + float64_t *compensation_x, + float64_t *compensation_xx, + float64_t *compensation_xxx) nogil: """ remove a value from the skew calc """ cdef: float64_t y, t @@ -677,10 +676,10 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, cdef float64_t calc_kurt(int64_t minp, int64_t nobs, - float64_t x, float64_t xx, - float64_t xxx, float64_t xxxx, - int64_t num_consecutive_same_value, - ) nogil: + float64_t x, float64_t xx, + float64_t xxx, float64_t xxxx, + int64_t num_consecutive_same_value, + ) nogil: cdef: float64_t result, dnobs float64_t A, B, C, D, R, K @@ -723,15 +722,15 @@ cdef float64_t calc_kurt(int64_t minp, int64_t nobs, cdef void add_kurt(float64_t val, int64_t *nobs, - float64_t *x, float64_t *xx, - float64_t *xxx, float64_t *xxxx, - float64_t *compensation_x, - float64_t *compensation_xx, - float64_t *compensation_xxx, - float64_t *compensation_xxxx, - int64_t *num_consecutive_same_value, - float64_t *prev_value - ) nogil: + float64_t *x, float64_t *xx, + float64_t *xxx, float64_t *xxxx, + float64_t *compensation_x, + float64_t *compensation_xx, + float64_t *compensation_xxx, + float64_t *compensation_xxxx, + int64_t *num_consecutive_same_value, + float64_t *prev_value + ) nogil: """ add a value from the kurotic calc """ cdef: float64_t y, t @@ -767,12 +766,12 @@ cdef void add_kurt(float64_t val, int64_t *nobs, cdef void remove_kurt(float64_t val, int64_t *nobs, - float64_t *x, float64_t *xx, - float64_t *xxx, float64_t *xxxx, - float64_t *compensation_x, - float64_t *compensation_xx, - float64_t *compensation_xxx, - float64_t *compensation_xxxx) nogil: + float64_t *x, float64_t *xx, + float64_t *xxx, float64_t *xxxx, + float64_t *compensation_x, + float64_t *compensation_xx, + float64_t *compensation_xxx, + float64_t *compensation_xxxx) nogil: """ remove a value from the kurotic calc """ cdef: float64_t y, t @@ -1517,11 +1516,11 @@ cdef float64_t[:] _roll_weighted_sum_mean(const float64_t[:] values, cdef float64_t calc_weighted_var(float64_t t, - float64_t sum_w, - Py_ssize_t win_n, - unsigned int ddof, - float64_t nobs, - int64_t minp) nogil: + float64_t sum_w, + Py_ssize_t win_n, + unsigned int ddof, + float64_t nobs, + int64_t minp) nogil: """ Calculate weighted variance for a window using West's method. @@ -1568,11 +1567,11 @@ cdef float64_t calc_weighted_var(float64_t t, cdef void add_weighted_var(float64_t val, - float64_t w, - float64_t *t, - float64_t *sum_w, - float64_t *mean, - float64_t *nobs) nogil: + float64_t w, + float64_t *t, + float64_t *sum_w, + float64_t *mean, + float64_t *nobs) nogil: """ Update weighted mean, sum of weights and sum of weighted squared differences to include value and weight pair in weighted variance @@ -1614,11 +1613,11 @@ cdef void add_weighted_var(float64_t val, cdef void remove_weighted_var(float64_t val, - float64_t w, - float64_t *t, - float64_t *sum_w, - float64_t *mean, - float64_t *nobs) nogil: + float64_t w, + float64_t *t, + float64_t *sum_w, + float64_t *mean, + float64_t *nobs) nogil: """ Update weighted mean, sum of weights and sum of weighted squared differences to remove value and weight pair from weighted variance