Skip to content

Manual deduction of partitions #1743

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 20 additions & 4 deletions pyiceberg/io/pyarrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -2254,6 +2254,7 @@ def data_file_statistics_from_parquet_metadata(
parquet_metadata: pq.FileMetaData,
stats_columns: Dict[int, StatisticsCollector],
parquet_column_mapping: Dict[str, int],
check_schema: bool = True,
) -> DataFileStatistics:
"""
Compute and return DataFileStatistics that includes the following.
Expand Down Expand Up @@ -2299,7 +2300,15 @@ def data_file_statistics_from_parquet_metadata(

for pos in range(parquet_metadata.num_columns):
column = row_group.column(pos)
field_id = parquet_column_mapping[column.path_in_schema]
try:
field_id = parquet_column_mapping[column.path_in_schema]
except KeyError:
if check_schema:
raise
else:
logger.warning("PyArrow column %d missing in current schema", pos)
continue


stats_col = stats_columns[field_id]

Expand Down Expand Up @@ -2464,7 +2473,7 @@ def _check_pyarrow_schema_compatible(
_check_schema_compatible(requested_schema, provided_schema)


def parquet_files_to_data_files(io: FileIO, table_metadata: TableMetadata, file_paths: Iterator[str]) -> Iterator[DataFile]:
def parquet_files_to_data_files(io: FileIO, table_metadata: TableMetadata, file_paths: Iterator[str], check_schema: bool = True, partition_deductor: Callable[[str], Record] | None = None) -> Iterator[DataFile]:
for file_path in file_paths:
input_file = io.new_input(file_path)
with input_file.open() as input_stream:
Expand All @@ -2475,18 +2484,25 @@ def parquet_files_to_data_files(io: FileIO, table_metadata: TableMetadata, file_
f"Cannot add file {file_path} because it has field IDs. `add_files` only supports addition of files without field_ids"
)
schema = table_metadata.schema()
_check_pyarrow_schema_compatible(schema, parquet_metadata.schema.to_arrow_schema())
if check_schema:
_check_pyarrow_schema_compatible(schema, parquet_metadata.schema.to_arrow_schema())
Comment on lines +2487 to +2488
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

At Iceberg, we're pretty concerned at making sure that everything is compatible at write time. Instead, we could also change the _check_pyarrow_schema_compatible to allow for additional columns in the Parquet column.

It is okay to skip optional columns but not required ones.


statistics = data_file_statistics_from_parquet_metadata(
parquet_metadata=parquet_metadata,
stats_columns=compute_statistics_plan(schema, table_metadata.properties),
parquet_column_mapping=parquet_path_to_id_mapping(schema),
check_schema=check_schema,
)
if partition_deductor is None:
partition = statistics.partition(table_metadata.spec(), table_metadata.schema())
else:
partition = partition_deductor(file_path)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While you can add keys to the Record, it is looked up by position, based on the Schema that belongs to it (in this case, the one of the active PartitionSpec.


data_file = DataFile(
content=DataFileContent.DATA,
file_path=file_path,
file_format=FileFormat.PARQUET,
partition=statistics.partition(table_metadata.spec(), table_metadata.schema()),
partition=partition,
file_size_in_bytes=len(input_file),
sort_order_id=None,
spec_id=table_metadata.default_spec_id,
Expand Down
13 changes: 6 additions & 7 deletions pyiceberg/table/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ def delete(
warnings.warn("Delete operation did not match any records")

def add_files(
self, file_paths: List[str], snapshot_properties: Dict[str, str] = EMPTY_DICT, check_duplicate_files: bool = True
self, file_paths: List[str], snapshot_properties: Dict[str, str] = EMPTY_DICT, check_duplicate_files: bool = True, check_schema: bool = True, partition_deductor: Callable[[str], Record] | None = None,
) -> None:
"""
Shorthand API for adding files as data files to the table transaction.
Expand Down Expand Up @@ -717,7 +717,7 @@ def add_files(
)
with self.update_snapshot(snapshot_properties=snapshot_properties).fast_append() as update_snapshot:
data_files = _parquet_files_to_data_files(
table_metadata=self.table_metadata, file_paths=file_paths, io=self._table.io
table_metadata=self.table_metadata, file_paths=file_paths, io=self._table.io, check_schema = check_schema, partition_deductor = partition_deductor
)
for data_file in data_files:
update_snapshot.append_data_file(data_file)
Expand Down Expand Up @@ -1283,7 +1283,7 @@ def delete(
tx.delete(delete_filter=delete_filter, case_sensitive=case_sensitive, snapshot_properties=snapshot_properties)

def add_files(
self, file_paths: List[str], snapshot_properties: Dict[str, str] = EMPTY_DICT, check_duplicate_files: bool = True
self, file_paths: List[str], snapshot_properties: Dict[str, str] = EMPTY_DICT, check_duplicate_files: bool = True, check_schema: bool = True, partition_deductor: Callable[[str], Record] | None = None,
) -> None:
"""
Shorthand API for adding files as data files to the table.
Expand All @@ -1296,8 +1296,7 @@ def add_files(
"""
with self.transaction() as tx:
tx.add_files(
file_paths=file_paths, snapshot_properties=snapshot_properties, check_duplicate_files=check_duplicate_files
)
file_paths=file_paths, snapshot_properties=snapshot_properties, check_duplicate_files=check_duplicate_files, check_schema=check_schema, partition_deductor=partition_deductor)

def update_spec(self, case_sensitive: bool = True) -> UpdateSpec:
return UpdateSpec(Transaction(self, autocommit=True), case_sensitive=case_sensitive)
Expand Down Expand Up @@ -1883,12 +1882,12 @@ class AddFileTask:
partition_field_value: Record


def _parquet_files_to_data_files(table_metadata: TableMetadata, file_paths: List[str], io: FileIO) -> Iterable[DataFile]:
def _parquet_files_to_data_files(table_metadata: TableMetadata, file_paths: List[str], io: FileIO, check_schema: bool = True, partition_deductor: Callable[[str], Record] | None = None) -> Iterable[DataFile]:
"""Convert a list files into DataFiles.

Returns:
An iterable that supplies DataFiles that describe the parquet files.
"""
from pyiceberg.io.pyarrow import parquet_files_to_data_files

yield from parquet_files_to_data_files(io=io, table_metadata=table_metadata, file_paths=iter(file_paths))
yield from parquet_files_to_data_files(io=io, table_metadata=table_metadata, file_paths=iter(file_paths), check_schema=check_schema, partition_deductor=partition_deductor)