-
Notifications
You must be signed in to change notification settings - Fork 297
feat: validate snapshot write compatibility #1772
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 6 commits
949e140
e631ddf
740db96
611b017
0923dc4
57e0f90
5122039
66849dd
0824c35
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,7 +28,7 @@ | |
from pytest_mock.plugin import MockerFixture | ||
|
||
from pyiceberg.catalog import Catalog | ||
from pyiceberg.exceptions import NoSuchTableError | ||
from pyiceberg.exceptions import CommitFailedException, NoSuchTableError | ||
from pyiceberg.io import FileIO | ||
from pyiceberg.io.pyarrow import UnsupportedPyArrowTypeException, _pyarrow_schema_ensure_large_types | ||
from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionField, PartitionSpec | ||
|
@@ -850,3 +850,70 @@ def test_add_files_that_referenced_by_current_snapshot_with_check_duplicate_file | |
with pytest.raises(ValueError) as exc_info: | ||
tbl.add_files(file_paths=[existing_files_in_table], check_duplicate_files=True) | ||
assert f"Cannot add files that are already referenced by table, files: {existing_files_in_table}" in str(exc_info.value) | ||
|
||
|
||
@pytest.mark.integration | ||
@pytest.mark.parametrize("format_version", [1, 2]) | ||
def test_conflict_delete_delete( | ||
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int | ||
) -> None: | ||
identifier = "default.test_conflict" | ||
tbl1 = _create_table(session_catalog, identifier, format_version, schema=arrow_table_with_null.schema) | ||
tbl1.append(arrow_table_with_null) | ||
tbl2 = session_catalog.load_table(identifier) | ||
|
||
tbl1.delete("string == 'z'") | ||
|
||
with pytest.raises( | ||
CommitFailedException, match="Operation .* is not allowed when performing .*. Check for overlaps or conflicts." | ||
): | ||
# tbl2 isn't aware of the commit by tbl1 | ||
tbl2.delete("string == 'z'") | ||
|
||
|
||
@pytest.mark.integration | ||
@pytest.mark.parametrize("format_version", [1, 2]) | ||
def test_conflict_delete_append( | ||
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int | ||
) -> None: | ||
identifier = "default.test_conflict" | ||
tbl1 = _create_table(session_catalog, identifier, format_version, schema=arrow_table_with_null.schema) | ||
tbl1.append(arrow_table_with_null) | ||
tbl2 = session_catalog.load_table(identifier) | ||
|
||
# This is allowed | ||
tbl1.delete("string == 'z'") | ||
tbl2.append(arrow_table_with_null) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should verify the content of the table here |
||
|
||
|
||
@pytest.mark.integration | ||
@pytest.mark.parametrize("format_version", [1, 2]) | ||
def test_conflict_append_delete( | ||
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int | ||
) -> None: | ||
identifier = "default.test_conflict" | ||
tbl1 = _create_table(session_catalog, identifier, format_version, schema=arrow_table_with_null.schema) | ||
tbl1.append(arrow_table_with_null) | ||
tbl2 = session_catalog.load_table(identifier) | ||
|
||
tbl1.append(arrow_table_with_null) | ||
|
||
with pytest.raises( | ||
CommitFailedException, match="Operation .* is not allowed when performing .*. Check for overlaps or conflicts." | ||
): | ||
# tbl2 isn't aware of the commit by tbl1 | ||
tbl2.delete("string == 'z'") | ||
|
||
|
||
@pytest.mark.integration | ||
@pytest.mark.parametrize("format_version", [1, 2]) | ||
def test_conflict_append_append( | ||
spark: SparkSession, session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int | ||
) -> None: | ||
identifier = "default.test_conflict" | ||
tbl1 = _create_table(session_catalog, identifier, format_version, schema=arrow_table_with_null.schema) | ||
tbl1.append(arrow_table_with_null) | ||
tbl2 = session_catalog.load_table(identifier) | ||
|
||
tbl1.append(arrow_table_with_null) | ||
tbl2.append(arrow_table_with_null) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. could we introduce an assertion here to verify the content of the table is as we'd expect? (with 3*arrow_table_with_null data) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think the spec may need a re-review because I think it's inaccurate to say that we only need to verify that the files we are trying to delete are still available when we are executing a
REPLACE
orDELETE
operation.In Spark, we also validate whether there's been a conflicting appends when we use
SERIALIZABLE
isolation level:https://github.com/apache/iceberg/blob/9fc49e187069c7ec2493ac0abf20f73175b3df89/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkWrite.java#L368-L374
I think it would be helpful to introduce all three types of isolation levels
NONE
,SERIALIZABLE
andSNAPSHOT
, and verify if conflicting appends or deletes have been introduced in the underlying partitions to be aligned with the implementation in SparkUh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks @sungwy for jumping in here, and creating the issues 🙌
Indeed, depending on whether we do snapshot or serializable isolation, we should allow for new data (or not). Would you be willing to split out the different levels in a separate PR? It would be nice to get this in so we can start working independently on the subtasks that you created.
I think this one was mostly blocked on #1903