From d5b59c59a0875f4071efa6502a56b0748d810b6a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 4 May 2025 10:19:13 +0200 Subject: [PATCH 1/3] The ruff ruleset is TC, not TCH --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f1c290e1b1..4a82dce1bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -291,8 +291,8 @@ extend-exclude = [ extend-select = [ "ANN", # flake8-annotations "B", # flake8-bugbear - "EXE", # flake8-executable "C4", # flake8-comprehensions + "EXE", # flake8-executable "FA", # flake8-future-annotations "FLY", # flynt "FURB", # refurb From 3143e97f940bb605dc3241414475a9bdec52c58f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 4 May 2025 10:08:41 +0200 Subject: [PATCH 2/3] Apply ruff/flake8-type-checking rule TC006 TC006 Add quotes to type expression in `typing.cast()` --- src/zarr/api/asynchronous.py | 2 +- src/zarr/codecs/crc32c_.py | 6 +++-- src/zarr/codecs/sharding.py | 2 +- src/zarr/codecs/transpose.py | 2 +- src/zarr/core/array.py | 24 ++++++++++---------- src/zarr/core/array_spec.py | 2 +- src/zarr/core/buffer/core.py | 12 +++++----- src/zarr/core/buffer/gpu.py | 4 ++-- src/zarr/core/chunk_key_encodings.py | 4 ++-- src/zarr/core/common.py | 4 ++-- src/zarr/core/config.py | 2 +- src/zarr/core/group.py | 9 ++++---- src/zarr/core/indexing.py | 34 ++++++++++++++-------------- src/zarr/core/metadata/v2.py | 6 ++--- src/zarr/core/metadata/v3.py | 6 ++--- src/zarr/core/strings.py | 4 ++-- src/zarr/testing/utils.py | 2 +- 17 files changed, 63 insertions(+), 62 deletions(-) diff --git a/src/zarr/api/asynchronous.py b/src/zarr/api/asynchronous.py index 4f3c9c3f8f..5b9c0bee3d 100644 --- a/src/zarr/api/asynchronous.py +++ b/src/zarr/api/asynchronous.py @@ -329,7 +329,7 @@ async def open( try: metadata_dict = await get_array_metadata(store_path, zarr_format=zarr_format) # TODO: remove this cast when we fix typing for array metadata dicts - _metadata_dict = cast(ArrayMetadataDict, metadata_dict) + _metadata_dict = cast("ArrayMetadataDict", metadata_dict) # for v2, the above would already have raised an exception if not an array zarr_format = _metadata_dict["zarr_format"] is_v3_array = zarr_format == 3 and _metadata_dict.get("node_type") == "array" diff --git a/src/zarr/codecs/crc32c_.py b/src/zarr/codecs/crc32c_.py index ab8a57eba7..6da673ceac 100644 --- a/src/zarr/codecs/crc32c_.py +++ b/src/zarr/codecs/crc32c_.py @@ -40,7 +40,9 @@ async def _decode_single( inner_bytes = data[:-4] # Need to do a manual cast until https://github.com/numpy/numpy/issues/26783 is resolved - computed_checksum = np.uint32(crc32c(cast(typing_extensions.Buffer, inner_bytes))).tobytes() + computed_checksum = np.uint32( + crc32c(cast("typing_extensions.Buffer", inner_bytes)) + ).tobytes() stored_checksum = bytes(crc32_bytes) if computed_checksum != stored_checksum: raise ValueError( @@ -55,7 +57,7 @@ async def _encode_single( ) -> Buffer | None: data = chunk_bytes.as_numpy_array() # Calculate the checksum and "cast" it to a numpy array - checksum = np.array([crc32c(cast(typing_extensions.Buffer, data))], dtype=np.uint32) + checksum = np.array([crc32c(cast("typing_extensions.Buffer", data))], dtype=np.uint32) # Append the checksum (as bytes) to the data return chunk_spec.prototype.buffer.from_array_like(np.append(data, checksum.view("B"))) diff --git a/src/zarr/codecs/sharding.py b/src/zarr/codecs/sharding.py index bee36b3160..4638d973cb 100644 --- a/src/zarr/codecs/sharding.py +++ b/src/zarr/codecs/sharding.py @@ -115,7 +115,7 @@ class _ShardIndex(NamedTuple): def chunks_per_shard(self) -> ChunkCoords: result = tuple(self.offsets_and_lengths.shape[0:-1]) # The cast is required until https://github.com/numpy/numpy/pull/27211 is merged - return cast(ChunkCoords, result) + return cast("ChunkCoords", result) def _localize_chunk(self, chunk_coords: ChunkCoords) -> ChunkCoords: return tuple( diff --git a/src/zarr/codecs/transpose.py b/src/zarr/codecs/transpose.py index 1aa1eb40e2..85e4526b8b 100644 --- a/src/zarr/codecs/transpose.py +++ b/src/zarr/codecs/transpose.py @@ -23,7 +23,7 @@ def parse_transpose_order(data: JSON | Iterable[int]) -> tuple[int, ...]: raise TypeError(f"Expected an iterable. Got {data} instead.") if not all(isinstance(a, int) for a in data): raise TypeError(f"Expected an iterable of integers. Got {data} instead.") - return tuple(cast(Iterable[int], data)) + return tuple(cast("Iterable[int]", data)) @dataclass(frozen=True) diff --git a/src/zarr/core/array.py b/src/zarr/core/array.py index 78b5e92ed6..abc95d7416 100644 --- a/src/zarr/core/array.py +++ b/src/zarr/core/array.py @@ -903,7 +903,7 @@ async def open( store_path = await make_store_path(store) metadata_dict = await get_array_metadata(store_path, zarr_format=zarr_format) # TODO: remove this cast when we have better type hints - _metadata_dict = cast(ArrayV3MetadataDict, metadata_dict) + _metadata_dict = cast("ArrayV3MetadataDict", metadata_dict) return cls(store_path=store_path, metadata=_metadata_dict) @property @@ -1399,7 +1399,7 @@ async def _set_selection( if isinstance(array_like, np._typing._SupportsArrayFunc): # TODO: need to handle array types that don't support __array_function__ # like PyTorch and JAX - array_like_ = cast(np._typing._SupportsArrayFunc, array_like) + array_like_ = cast("np._typing._SupportsArrayFunc", array_like) value = np.asanyarray(value, dtype=self.metadata.dtype, like=array_like_) else: if not hasattr(value, "shape"): @@ -1413,7 +1413,7 @@ async def _set_selection( value = value.astype(dtype=self.metadata.dtype, order="A") else: value = np.array(value, dtype=self.metadata.dtype, order="A") - value = cast(NDArrayLike, value) + value = cast("NDArrayLike", value) # We accept any ndarray like object from the user and convert it # to a NDBuffer (or subclass). From this point onwards, we only pass # Buffer and NDBuffer between components. @@ -2436,11 +2436,11 @@ def __getitem__(self, selection: Selection) -> NDArrayLikeOrScalar: """ fields, pure_selection = pop_fields(selection) if is_pure_fancy_indexing(pure_selection, self.ndim): - return self.vindex[cast(CoordinateSelection | MaskSelection, selection)] + return self.vindex[cast("CoordinateSelection | MaskSelection", selection)] elif is_pure_orthogonal_indexing(pure_selection, self.ndim): return self.get_orthogonal_selection(pure_selection, fields=fields) else: - return self.get_basic_selection(cast(BasicSelection, pure_selection), fields=fields) + return self.get_basic_selection(cast("BasicSelection", pure_selection), fields=fields) def __setitem__(self, selection: Selection, value: npt.ArrayLike) -> None: """Modify data for an item or region of the array. @@ -2535,11 +2535,11 @@ def __setitem__(self, selection: Selection, value: npt.ArrayLike) -> None: """ fields, pure_selection = pop_fields(selection) if is_pure_fancy_indexing(pure_selection, self.ndim): - self.vindex[cast(CoordinateSelection | MaskSelection, selection)] = value + self.vindex[cast("CoordinateSelection | MaskSelection", selection)] = value elif is_pure_orthogonal_indexing(pure_selection, self.ndim): self.set_orthogonal_selection(pure_selection, value, fields=fields) else: - self.set_basic_selection(cast(BasicSelection, pure_selection), value, fields=fields) + self.set_basic_selection(cast("BasicSelection", pure_selection), value, fields=fields) @_deprecate_positional_args def get_basic_selection( @@ -3657,7 +3657,7 @@ def update_attributes(self, new_attributes: dict[str, JSON]) -> Array: # TODO: remove this cast when type inference improves new_array = sync(self._async_array.update_attributes(new_attributes)) # TODO: remove this cast when type inference improves - _new_array = cast(AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata], new_array) + _new_array = cast("AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]", new_array) return type(self)(_new_array) def __repr__(self) -> str: @@ -4252,7 +4252,7 @@ async def init_array( serializer=serializer, dtype=dtype_parsed, ) - sub_codecs = cast(tuple[Codec, ...], (*array_array, array_bytes, *bytes_bytes)) + sub_codecs = cast("tuple[Codec, ...]", (*array_array, array_bytes, *bytes_bytes)) codecs_out: tuple[Codec, ...] if shard_shape_parsed is not None: index_location = None @@ -4523,7 +4523,7 @@ def _parse_keep_array_attr( compressors = "auto" if serializer == "keep": if zarr_format == 3 and data.metadata.zarr_format == 3: - serializer = cast(SerializerLike, data.serializer) + serializer = cast("SerializerLike", data.serializer) else: serializer = "auto" if fill_value is None: @@ -4701,7 +4701,7 @@ def _parse_chunk_encoding_v3( if isinstance(filters, dict | Codec): maybe_array_array = (filters,) else: - maybe_array_array = cast(Iterable[Codec | dict[str, JSON]], filters) + maybe_array_array = cast("Iterable[Codec | dict[str, JSON]]", filters) out_array_array = tuple(_parse_array_array_codec(c) for c in maybe_array_array) if serializer == "auto": @@ -4718,7 +4718,7 @@ def _parse_chunk_encoding_v3( if isinstance(compressors, dict | Codec): maybe_bytes_bytes = (compressors,) else: - maybe_bytes_bytes = cast(Iterable[Codec | dict[str, JSON]], compressors) + maybe_bytes_bytes = cast("Iterable[Codec | dict[str, JSON]]", compressors) out_bytes_bytes = tuple(_parse_bytes_bytes_codec(c) for c in maybe_bytes_bytes) diff --git a/src/zarr/core/array_spec.py b/src/zarr/core/array_spec.py index 59d3cc6b40..6cd27b30eb 100644 --- a/src/zarr/core/array_spec.py +++ b/src/zarr/core/array_spec.py @@ -64,7 +64,7 @@ def from_dict(cls, data: ArrayConfigParams) -> Self: """ kwargs_out: ArrayConfigParams = {} for f in fields(ArrayConfig): - field_name = cast(Literal["order", "write_empty_chunks"], f.name) + field_name = cast("Literal['order', 'write_empty_chunks']", f.name) if field_name not in data: kwargs_out[field_name] = zarr_config.get(f"array.{field_name}") else: diff --git a/src/zarr/core/buffer/core.py b/src/zarr/core/buffer/core.py index 94cd91f026..d0a2d992d2 100644 --- a/src/zarr/core/buffer/core.py +++ b/src/zarr/core/buffer/core.py @@ -159,7 +159,7 @@ def create_zero_length(cls) -> Self: if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( - cast(ArrayLike, None) + cast("ArrayLike", None) ) # This line will never be reached, but it satisfies the type checker @classmethod @@ -207,7 +207,7 @@ def from_buffer(cls, buffer: Buffer) -> Self: if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( - cast(ArrayLike, None) + cast("ArrayLike", None) ) # This line will never be reached, but it satisfies the type checker @classmethod @@ -227,7 +227,7 @@ def from_bytes(cls, bytes_like: BytesLike) -> Self: if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( - cast(ArrayLike, None) + cast("ArrayLike", None) ) # This line will never be reached, but it satisfies the type checker def as_array_like(self) -> ArrayLike: @@ -371,7 +371,7 @@ def create( "Cannot call abstract method on the abstract class 'NDBuffer'" ) return cls( - cast(NDArrayLike, None) + cast("NDArrayLike", None) ) # This line will never be reached, but it satisfies the type checker @classmethod @@ -408,7 +408,7 @@ def from_numpy_array(cls, array_like: npt.ArrayLike) -> Self: "Cannot call abstract method on the abstract class 'NDBuffer'" ) return cls( - cast(NDArrayLike, None) + cast("NDArrayLike", None) ) # This line will never be reached, but it satisfies the type checker def as_ndarray_like(self) -> NDArrayLike: @@ -440,7 +440,7 @@ def as_scalar(self) -> ScalarType: """Returns the buffer as a scalar value""" if self._data.size != 1: raise ValueError("Buffer does not contain a single scalar value") - return cast(ScalarType, self.as_numpy_array()[()]) + return cast("ScalarType", self.as_numpy_array()[()]) @property def dtype(self) -> np.dtype[Any]: diff --git a/src/zarr/core/buffer/gpu.py b/src/zarr/core/buffer/gpu.py index 77d2731c71..88746c5fac 100644 --- a/src/zarr/core/buffer/gpu.py +++ b/src/zarr/core/buffer/gpu.py @@ -103,7 +103,7 @@ def from_bytes(cls, bytes_like: BytesLike) -> Self: return cls.from_array_like(cp.frombuffer(bytes_like, dtype="B")) def as_numpy_array(self) -> npt.NDArray[Any]: - return cast(npt.NDArray[Any], cp.asnumpy(self._data)) + return cast("npt.NDArray[Any]", cp.asnumpy(self._data)) def __add__(self, other: core.Buffer) -> Self: other_array = other.as_array_like() @@ -204,7 +204,7 @@ def as_numpy_array(self) -> npt.NDArray[Any]: ------- NumPy array of this buffer (might be a data copy) """ - return cast(npt.NDArray[Any], cp.asnumpy(self._data)) + return cast("npt.NDArray[Any]", cp.asnumpy(self._data)) def __getitem__(self, key: Any) -> Self: return self.__class__(self._data.__getitem__(key)) diff --git a/src/zarr/core/chunk_key_encodings.py b/src/zarr/core/chunk_key_encodings.py index 103472c3b4..91dfc90365 100644 --- a/src/zarr/core/chunk_key_encodings.py +++ b/src/zarr/core/chunk_key_encodings.py @@ -20,7 +20,7 @@ def parse_separator(data: JSON) -> SeparatorLiteral: if data not in (".", "/"): raise ValueError(f"Expected an '.' or '/' separator. Got {data} instead.") - return cast(SeparatorLiteral, data) + return cast("SeparatorLiteral", data) class ChunkKeyEncodingParams(TypedDict): @@ -48,7 +48,7 @@ def from_dict(cls, data: dict[str, JSON] | ChunkKeyEncodingLike) -> ChunkKeyEnco data = {"name": data["name"], "configuration": {"separator": data["separator"]}} # TODO: remove this cast when we are statically typing the JSON metadata completely. - data = cast(dict[str, JSON], data) + data = cast("dict[str, JSON]", data) # configuration is optional for chunk key encodings name_parsed, config_parsed = parse_named_configuration(data, require_configuration=False) diff --git a/src/zarr/core/common.py b/src/zarr/core/common.py index a670834206..be37dc5109 100644 --- a/src/zarr/core/common.py +++ b/src/zarr/core/common.py @@ -158,7 +158,7 @@ def parse_fill_value(data: Any) -> Any: def parse_order(data: Any) -> Literal["C", "F"]: if data in ("C", "F"): - return cast(Literal["C", "F"], data) + return cast("Literal['C', 'F']", data) raise ValueError(f"Expected one of ('C', 'F'), got {data} instead.") @@ -202,4 +202,4 @@ def _warn_order_kwarg() -> None: def _default_zarr_format() -> ZarrFormat: """Return the default zarr_version""" - return cast(ZarrFormat, int(zarr_config.get("default_zarr_format", 3))) + return cast("ZarrFormat", int(zarr_config.get("default_zarr_format", 3))) diff --git a/src/zarr/core/config.py b/src/zarr/core/config.py index c565cb0708..2a10943d80 100644 --- a/src/zarr/core/config.py +++ b/src/zarr/core/config.py @@ -134,6 +134,6 @@ def enable_gpu(self) -> ConfigSet: def parse_indexing_order(data: Any) -> Literal["C", "F"]: if data in ("C", "F"): - return cast(Literal["C", "F"], data) + return cast("Literal['C', 'F']", data) msg = f"Expected one of ('C', 'F'), got {data} instead." raise ValueError(msg) diff --git a/src/zarr/core/group.py b/src/zarr/core/group.py index 5c470e29ca..4124fd024b 100644 --- a/src/zarr/core/group.py +++ b/src/zarr/core/group.py @@ -81,7 +81,7 @@ def parse_zarr_format(data: Any) -> ZarrFormat: """Parse the zarr_format field from metadata.""" if data in (2, 3): - return cast(ZarrFormat, data) + return cast("ZarrFormat", data) msg = f"Invalid zarr_format. Expected one of 2 or 3. Got {data}." raise ValueError(msg) @@ -89,7 +89,7 @@ def parse_zarr_format(data: Any) -> ZarrFormat: def parse_node_type(data: Any) -> NodeType: """Parse the node_type field from metadata.""" if data in ("array", "group"): - return cast(Literal["array", "group"], data) + return cast("Literal['array', 'group']", data) raise MetadataValidationError("node_type", "array or group", data) @@ -362,7 +362,7 @@ def to_buffer_dict(self, prototype: BufferPrototype) -> dict[str, Buffer]: # it's an array if isinstance(v.get("fill_value", None), np.void): v["fill_value"] = base64.standard_b64encode( - cast(bytes, v["fill_value"]) + cast("bytes", v["fill_value"]) ).decode("ascii") else: v = _replace_special_floats(v) @@ -3246,8 +3246,7 @@ def _ensure_consistent_zarr_format( raise ValueError(msg) return cast( - Mapping[str, GroupMetadata | ArrayV2Metadata] - | Mapping[str, GroupMetadata | ArrayV3Metadata], + "Mapping[str, GroupMetadata | ArrayV2Metadata] | Mapping[str, GroupMetadata | ArrayV3Metadata]", data, ) diff --git a/src/zarr/core/indexing.py b/src/zarr/core/indexing.py index 998fe156a1..c11889f7f4 100644 --- a/src/zarr/core/indexing.py +++ b/src/zarr/core/indexing.py @@ -466,7 +466,7 @@ def replace_ellipsis(selection: Any, shape: ChunkCoords) -> SelectionNormalized: # check selection not too long check_selection_length(selection, shape) - return cast(SelectionNormalized, selection) + return cast("SelectionNormalized", selection) def replace_lists(selection: SelectionNormalized) -> SelectionNormalized: @@ -481,7 +481,7 @@ def replace_lists(selection: SelectionNormalized) -> SelectionNormalized: def ensure_tuple(v: Any) -> SelectionNormalized: if not isinstance(v, tuple): v = (v,) - return cast(SelectionNormalized, v) + return cast("SelectionNormalized", v) class ChunkProjection(NamedTuple): @@ -818,7 +818,7 @@ def ix_(selection: Any, shape: ChunkCoords) -> npt.NDArray[np.intp]: # now get numpy to convert to a coordinate selection selection = np.ix_(*selection) - return cast(npt.NDArray[np.intp], selection) + return cast("npt.NDArray[np.intp]", selection) def oindex(a: npt.NDArray[Any], selection: Selection) -> npt.NDArray[Any]: @@ -948,7 +948,7 @@ def __getitem__(self, selection: OrthogonalSelection | Array) -> NDArrayLikeOrSc new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.get_orthogonal_selection( - cast(OrthogonalSelection, new_selection), fields=fields + cast("OrthogonalSelection", new_selection), fields=fields ) def __setitem__(self, selection: OrthogonalSelection, value: npt.ArrayLike) -> None: @@ -956,7 +956,7 @@ def __setitem__(self, selection: OrthogonalSelection, value: npt.ArrayLike) -> N new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.set_orthogonal_selection( - cast(OrthogonalSelection, new_selection), value, fields=fields + cast("OrthogonalSelection", new_selection), value, fields=fields ) @@ -1050,14 +1050,14 @@ def __getitem__(self, selection: BasicSelection) -> NDArrayLikeOrScalar: fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) - return self.array.get_block_selection(cast(BasicSelection, new_selection), fields=fields) + return self.array.get_block_selection(cast("BasicSelection", new_selection), fields=fields) def __setitem__(self, selection: BasicSelection, value: npt.ArrayLike) -> None: fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.set_block_selection( - cast(BasicSelection, new_selection), value, fields=fields + cast("BasicSelection", new_selection), value, fields=fields ) @@ -1105,12 +1105,12 @@ def __init__( nchunks = reduce(operator.mul, cdata_shape, 1) # some initial normalization - selection_normalized = cast(CoordinateSelectionNormalized, ensure_tuple(selection)) + selection_normalized = cast("CoordinateSelectionNormalized", ensure_tuple(selection)) selection_normalized = tuple( np.asarray([i]) if is_integer(i) else i for i in selection_normalized ) selection_normalized = cast( - CoordinateSelectionNormalized, replace_lists(selection_normalized) + "CoordinateSelectionNormalized", replace_lists(selection_normalized) ) # validation @@ -1214,8 +1214,8 @@ def __iter__(self) -> Iterator[ChunkProjection]: class MaskIndexer(CoordinateIndexer): def __init__(self, selection: MaskSelection, shape: ChunkCoords, chunk_grid: ChunkGrid) -> None: # some initial normalization - selection_normalized = cast(tuple[MaskSelection], ensure_tuple(selection)) - selection_normalized = cast(tuple[MaskSelection], replace_lists(selection_normalized)) + selection_normalized = cast("tuple[MaskSelection]", ensure_tuple(selection)) + selection_normalized = cast("tuple[MaskSelection]", replace_lists(selection_normalized)) # validation if not is_mask_selection(selection_normalized, shape): @@ -1311,14 +1311,14 @@ def pop_fields(selection: SelectionWithFields) -> tuple[Fields | None, Selection elif not isinstance(selection, tuple): # single selection item, no fields # leave selection as-is - return None, cast(Selection, selection) + return None, cast("Selection", selection) else: # multiple items, split fields from selection items fields: Fields = [f for f in selection if isinstance(f, str)] fields = fields[0] if len(fields) == 1 else fields selection_tuple = tuple(s for s in selection if not isinstance(s, str)) selection = cast( - Selection, selection_tuple[0] if len(selection_tuple) == 1 else selection_tuple + "Selection", selection_tuple[0] if len(selection_tuple) == 1 else selection_tuple ) return fields, selection @@ -1380,12 +1380,12 @@ def get_indexer( new_selection = ensure_tuple(selection) new_selection = replace_lists(new_selection) if is_coordinate_selection(new_selection, shape): - return CoordinateIndexer(cast(CoordinateSelection, selection), shape, chunk_grid) + return CoordinateIndexer(cast("CoordinateSelection", selection), shape, chunk_grid) elif is_mask_selection(new_selection, shape): - return MaskIndexer(cast(MaskSelection, selection), shape, chunk_grid) + return MaskIndexer(cast("MaskSelection", selection), shape, chunk_grid) else: raise VindexInvalidSelectionError(new_selection) elif is_pure_orthogonal_indexing(pure_selection, len(shape)): - return OrthogonalIndexer(cast(OrthogonalSelection, selection), shape, chunk_grid) + return OrthogonalIndexer(cast("OrthogonalSelection", selection), shape, chunk_grid) else: - return BasicIndexer(cast(BasicSelection, selection), shape, chunk_grid) + return BasicIndexer(cast("BasicSelection", selection), shape, chunk_grid) diff --git a/src/zarr/core/metadata/v2.py b/src/zarr/core/metadata/v2.py index 029a3e09a7..a8f4f4abb4 100644 --- a/src/zarr/core/metadata/v2.py +++ b/src/zarr/core/metadata/v2.py @@ -378,7 +378,7 @@ def _serialize_fill_value(fill_value: Any, dtype: np.dtype[Any]) -> JSON: # There's a relationship between dtype and fill_value # that mypy isn't aware of. The fact that we have S or V dtype here # means we should have a bytes-type fill_value. - serialized = base64.standard_b64encode(cast(bytes, fill_value)).decode("ascii") + serialized = base64.standard_b64encode(cast("bytes", fill_value)).decode("ascii") elif isinstance(fill_value, np.datetime64): serialized = np.datetime_as_string(fill_value) elif isinstance(fill_value, numbers.Integral): @@ -448,7 +448,7 @@ def _default_compressor( else: raise ValueError(f"Unsupported dtype kind {dtype.kind}") - return cast(dict[str, JSON] | None, default_compressor.get(dtype_key, None)) + return cast("dict[str, JSON] | None", default_compressor.get(dtype_key, None)) def _default_filters( @@ -470,4 +470,4 @@ def _default_filters( else: raise ValueError(f"Unsupported dtype kind {dtype.kind}") - return cast(list[dict[str, JSON]] | None, default_filters.get(dtype_key, None)) + return cast("list[dict[str, JSON]] | None", default_filters.get(dtype_key, None)) diff --git a/src/zarr/core/metadata/v3.py b/src/zarr/core/metadata/v3.py index 63f6515e44..dcbf44f89b 100644 --- a/src/zarr/core/metadata/v3.py +++ b/src/zarr/core/metadata/v3.py @@ -273,7 +273,7 @@ def __init__( fill_value = default_fill_value(data_type_parsed) # we pass a string here rather than an enum to make mypy happy fill_value_parsed = parse_fill_value( - fill_value, dtype=cast(ALL_DTYPES, data_type_parsed.value) + fill_value, dtype=cast("ALL_DTYPES", data_type_parsed.value) ) attributes_parsed = parse_attributes(attributes) codecs_parsed_partial = parse_codecs(codecs) @@ -524,7 +524,7 @@ def parse_fill_value( return np.bytes_(fill_value) # the rest are numeric types - np_dtype = cast(np.dtype[Any], data_type.to_numpy()) + np_dtype = cast("np.dtype[Any]", data_type.to_numpy()) if isinstance(fill_value, Sequence) and not isinstance(fill_value, str): if data_type in (DataType.complex64, DataType.complex128): @@ -588,7 +588,7 @@ def default_fill_value(dtype: DataType) -> str | bytes | np.generic: return b"" else: np_dtype = dtype.to_numpy() - np_dtype = cast(np.dtype[Any], np_dtype) + np_dtype = cast("np.dtype[Any]", np_dtype) return np_dtype.type(0) # type: ignore[misc] diff --git a/src/zarr/core/strings.py b/src/zarr/core/strings.py index ffca0c3b0c..15c5fddfee 100644 --- a/src/zarr/core/strings.py +++ b/src/zarr/core/strings.py @@ -30,7 +30,7 @@ def cast_array( data: np.ndarray[Any, np.dtype[Any]], ) -> np.ndarray[Any, np.dtypes.StringDType | np.dtypes.ObjectDType]: out = data.astype(_STRING_DTYPE, copy=False) - return cast(np.ndarray[Any, np.dtypes.StringDType], out) + return cast("np.ndarray[Any, np.dtypes.StringDType]", out) except AttributeError: # if not available, we fall back on an object array of strings, as in Zarr < 3 @@ -41,7 +41,7 @@ def cast_array( data: np.ndarray[Any, np.dtype[Any]], ) -> np.ndarray[Any, Union["np.dtypes.StringDType", "np.dtypes.ObjectDType"]]: out = data.astype(_STRING_DTYPE, copy=False) - return cast(np.ndarray[Any, np.dtypes.ObjectDType], out) + return cast("np.ndarray[Any, np.dtypes.ObjectDType]", out) def cast_to_string_dtype( diff --git a/src/zarr/testing/utils.py b/src/zarr/testing/utils.py index 7cf57ab9d6..afc15d742c 100644 --- a/src/zarr/testing/utils.py +++ b/src/zarr/testing/utils.py @@ -30,7 +30,7 @@ def has_cupy() -> bool: try: import cupy - return cast(bool, cupy.cuda.runtime.getDeviceCount() > 0) + return cast("bool", cupy.cuda.runtime.getDeviceCount() > 0) except ImportError: return False except cupy.cuda.runtime.CUDARuntimeError: From 2af5bcfa092a28f993ed6cd9673fb22a72656a70 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sun, 4 May 2025 10:22:47 +0200 Subject: [PATCH 3/3] Apply ruff/flake8-type-checking rule TC003 TC003 Move standard library import into a type-checking block --- src/zarr/core/group.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/zarr/core/group.py b/src/zarr/core/group.py index 4124fd024b..86cc6a3c6b 100644 --- a/src/zarr/core/group.py +++ b/src/zarr/core/group.py @@ -7,7 +7,6 @@ import logging import warnings from collections import defaultdict -from collections.abc import Iterator, Mapping from dataclasses import asdict, dataclass, field, fields, replace from itertools import accumulate from typing import TYPE_CHECKING, Literal, TypeVar, assert_never, cast, overload @@ -65,6 +64,8 @@ Coroutine, Generator, Iterable, + Iterator, + Mapping, ) from typing import Any