Blob Blame History Raw
From 65c859e60bb56326a22722dc92bea63d30735ace Mon Sep 17 00:00:00 2001
From: Elliott Sales de Andrade <quantum.analyst@gmail.com>
Date: Sat, 19 Aug 2023 16:49:33 -0400
Subject: [PATCH 4/6] Fix test_pandas_timestamp_overflow_pyarrow condition

The new behavior in pyarrow only occurs with Pandas 2.0 as well.

Signed-off-by: Elliott Sales de Andrade <quantum.analyst@gmail.com>
---
 dask/dataframe/io/tests/test_parquet.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/dask/dataframe/io/tests/test_parquet.py b/dask/dataframe/io/tests/test_parquet.py
index 518145cc..38ac7c40 100644
--- a/dask/dataframe/io/tests/test_parquet.py
+++ b/dask/dataframe/io/tests/test_parquet.py
@@ -3381,13 +3381,13 @@ def test_pandas_timestamp_overflow_pyarrow(tmpdir):
         table, f"{tmpdir}/file.parquet", use_deprecated_int96_timestamps=False
     )
 
-    if pyarrow_version < parse_version("13.0.0.dev"):
+    if pyarrow_version.major >= 13 and PANDAS_GE_200:
+        dd.read_parquet(str(tmpdir), engine="pyarrow").compute()
+    else:
         # This will raise by default due to overflow
         with pytest.raises(pa.lib.ArrowInvalid) as e:
             dd.read_parquet(str(tmpdir), engine="pyarrow").compute()
             assert "out of bounds" in str(e.value)
-    else:
-        dd.read_parquet(str(tmpdir), engine="pyarrow").compute()
 
     from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine as ArrowEngine
 
-- 
2.42.0