Skip to content

Commit b012d22

Browse files
All functions needing replacement have been replaced.
1 parent c4449f0 commit b012d22

File tree

2 files changed

+96
-96
lines changed

2 files changed

+96
-96
lines changed

pandas/tests/io/test_common.py

Lines changed: 75 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,12 @@ def test_stringify_path_fspath(self):
8686
result = icom.stringify_path(p)
8787
assert result == "foo/bar.csv"
8888

89-
def test_stringify_file_and_path_like(self):
89+
def test_stringify_file_and_path_like(self, temp_file):
9090
# GH 38125: do not stringify file objects that are also path-like
9191
fsspec = pytest.importorskip("fsspec")
92-
with tm.ensure_clean() as path:
93-
with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
94-
assert fsspec_obj == icom.stringify_path(fsspec_obj)
92+
path = temp_file
93+
with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
94+
assert fsspec_obj == icom.stringify_path(fsspec_obj)
9595

9696
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
9797
def test_infer_compression_from_path(self, compression_format, path_type):
@@ -338,11 +338,11 @@ def test_read_fspath_all(self, reader, module, path, datapath):
338338
("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"),
339339
],
340340
)
341-
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
341+
def test_write_fspath_all(self, writer_name, writer_kwargs, module, tmp_path):
342342
if writer_name in ["to_latex"]: # uses Styler implementation
343343
pytest.importorskip("jinja2")
344-
p1 = tm.ensure_clean("string")
345-
p2 = tm.ensure_clean("fspath")
344+
p1 = tmp_path / "string"
345+
p2 = tmp_path / "fspath"
346346
df = pd.DataFrame({"A": [1, 2]})
347347

348348
with p1 as string, p2 as fspath:
@@ -364,15 +364,15 @@ def test_write_fspath_all(self, writer_name, writer_kwargs, module):
364364
expected = f_path.read()
365365
assert result == expected
366366

367-
def test_write_fspath_hdf5(self):
367+
def test_write_fspath_hdf5(self, tmp_path):
368368
# Same test as write_fspath_all, except HDF5 files aren't
369369
# necessarily byte-for-byte identical for a given dataframe, so we'll
370370
# have to read and compare equality
371371
pytest.importorskip("tables")
372372

373373
df = pd.DataFrame({"A": [1, 2]})
374-
p1 = tm.ensure_clean("string")
375-
p2 = tm.ensure_clean("fspath")
374+
p1 = tmp_path / "string"
375+
p2 = tmp_path / "fspath"
376376

377377
with p1 as string, p2 as fspath:
378378
mypath = CustomFSPath(fspath)
@@ -432,35 +432,35 @@ def test_next(self, mmap_file):
432432
with pytest.raises(StopIteration, match=r"^$"):
433433
next(wrapper)
434434

435-
def test_unknown_engine(self):
436-
with tm.ensure_clean() as path:
437-
df = pd.DataFrame(
438-
1.1 * np.arange(120).reshape((30, 4)),
439-
columns=pd.Index(list("ABCD")),
440-
index=pd.Index([f"i-{i}" for i in range(30)]),
441-
)
442-
df.to_csv(path)
443-
with pytest.raises(ValueError, match="Unknown engine"):
444-
pd.read_csv(path, engine="pyt")
445-
446-
def test_binary_mode(self):
435+
def test_unknown_engine(self, temp_file):
436+
path = temp_file
437+
df = pd.DataFrame(
438+
1.1 * np.arange(120).reshape((30, 4)),
439+
columns=pd.Index(list("ABCD")),
440+
index=pd.Index([f"i-{i}" for i in range(30)]),
441+
)
442+
df.to_csv(path)
443+
with pytest.raises(ValueError, match="Unknown engine"):
444+
pd.read_csv(path, engine="pyt")
445+
446+
def test_binary_mode(self, temp_file):
447447
"""
448448
'encoding' shouldn't be passed to 'open' in binary mode.
449449
450450
GH 35058
451451
"""
452-
with tm.ensure_clean() as path:
453-
df = pd.DataFrame(
454-
1.1 * np.arange(120).reshape((30, 4)),
455-
columns=pd.Index(list("ABCD")),
456-
index=pd.Index([f"i-{i}" for i in range(30)]),
457-
)
458-
df.to_csv(path, mode="w+b")
459-
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
452+
path = temp_file
453+
df = pd.DataFrame(
454+
1.1 * np.arange(120).reshape((30, 4)),
455+
columns=pd.Index(list("ABCD")),
456+
index=pd.Index([f"i-{i}" for i in range(30)]),
457+
)
458+
df.to_csv(path, mode="w+b")
459+
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
460460

461461
@pytest.mark.parametrize("encoding", ["utf-16", "utf-32"])
462462
@pytest.mark.parametrize("compression_", ["bz2", "xz"])
463-
def test_warning_missing_utf_bom(self, encoding, compression_):
463+
def test_warning_missing_utf_bom(self, encoding, compression_, temp_file):
464464
"""
465465
bz2 and xz do not write the byte order mark (BOM) for utf-16/32.
466466
@@ -473,17 +473,17 @@ def test_warning_missing_utf_bom(self, encoding, compression_):
473473
columns=pd.Index(list("ABCD")),
474474
index=pd.Index([f"i-{i}" for i in range(30)]),
475475
)
476-
with tm.ensure_clean() as path:
477-
with tm.assert_produces_warning(UnicodeWarning, match="byte order mark"):
478-
df.to_csv(path, compression=compression_, encoding=encoding)
479-
480-
# reading should fail (otherwise we wouldn't need the warning)
481-
msg = (
482-
r"UTF-\d+ stream does not start with BOM|"
483-
r"'utf-\d+' codec can't decode byte"
484-
)
485-
with pytest.raises(UnicodeError, match=msg):
486-
pd.read_csv(path, compression=compression_, encoding=encoding)
476+
path = temp_file
477+
with tm.assert_produces_warning(UnicodeWarning, match="byte order mark"):
478+
df.to_csv(path, compression=compression_, encoding=encoding)
479+
480+
# reading should fail (otherwise we wouldn't need the warning)
481+
msg = (
482+
r"UTF-\d+ stream does not start with BOM|"
483+
r"'utf-\d+' codec can't decode byte"
484+
)
485+
with pytest.raises(UnicodeError, match=msg):
486+
pd.read_csv(path, compression=compression_, encoding=encoding)
487487

488488

489489
def test_is_fsspec_url():
@@ -514,38 +514,38 @@ def test_is_fsspec_url_chained():
514514

515515

516516
@pytest.mark.parametrize("format", ["csv", "json"])
517-
def test_codecs_encoding(format):
517+
def test_codecs_encoding(format, temp_file):
518518
# GH39247
519519
expected = pd.DataFrame(
520520
1.1 * np.arange(120).reshape((30, 4)),
521521
columns=pd.Index(list("ABCD")),
522522
index=pd.Index([f"i-{i}" for i in range(30)]),
523523
)
524-
with tm.ensure_clean() as path:
525-
with open(path, mode="w", encoding="utf-8") as handle:
526-
getattr(expected, f"to_{format}")(handle)
527-
with open(path, encoding="utf-8") as handle:
528-
if format == "csv":
529-
df = pd.read_csv(handle, index_col=0)
530-
else:
531-
df = pd.read_json(handle)
524+
path = temp_file
525+
with open(path, mode="w", encoding="utf-8") as handle:
526+
getattr(expected, f"to_{format}")(handle)
527+
with open(path, encoding="utf-8") as handle:
528+
if format == "csv":
529+
df = pd.read_csv(handle, index_col=0)
530+
else:
531+
df = pd.read_json(handle)
532532
tm.assert_frame_equal(expected, df)
533533

534534

535-
def test_codecs_get_writer_reader():
535+
def test_codecs_get_writer_reader(temp_file):
536536
# GH39247
537537
expected = pd.DataFrame(
538538
1.1 * np.arange(120).reshape((30, 4)),
539539
columns=pd.Index(list("ABCD")),
540540
index=pd.Index([f"i-{i}" for i in range(30)]),
541541
)
542-
with tm.ensure_clean() as path:
543-
with open(path, "wb") as handle:
544-
with codecs.getwriter("utf-8")(handle) as encoded:
545-
expected.to_csv(encoded)
546-
with open(path, "rb") as handle:
547-
with codecs.getreader("utf-8")(handle) as encoded:
548-
df = pd.read_csv(encoded, index_col=0)
542+
path = temp_file
543+
with open(path, "wb") as handle:
544+
with codecs.getwriter("utf-8")(handle) as encoded:
545+
expected.to_csv(encoded)
546+
with open(path, "rb") as handle:
547+
with codecs.getreader("utf-8")(handle) as encoded:
548+
df = pd.read_csv(encoded, index_col=0)
549549
tm.assert_frame_equal(expected, df)
550550

551551

@@ -572,7 +572,7 @@ def test_explicit_encoding(io_class, mode, msg):
572572

573573
@pytest.mark.parametrize("encoding_errors", ["strict", "replace"])
574574
@pytest.mark.parametrize("format", ["csv", "json"])
575-
def test_encoding_errors(encoding_errors, format):
575+
def test_encoding_errors(encoding_errors, format, temp_file):
576576
# GH39450
577577
msg = "'utf-8' codec can't decode byte"
578578
bad_encoding = b"\xe4"
@@ -591,18 +591,18 @@ def test_encoding_errors(encoding_errors, format):
591591
+ b'"}}'
592592
)
593593
reader = partial(pd.read_json, orient="index")
594-
with tm.ensure_clean() as path:
595-
file = Path(path)
596-
file.write_bytes(content)
594+
path = temp_file
595+
file = Path(path)
596+
file.write_bytes(content)
597597

598-
if encoding_errors != "replace":
599-
with pytest.raises(UnicodeDecodeError, match=msg):
600-
reader(path, encoding_errors=encoding_errors)
601-
else:
602-
df = reader(path, encoding_errors=encoding_errors)
603-
decoded = bad_encoding.decode(errors=encoding_errors)
604-
expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])
605-
tm.assert_frame_equal(df, expected)
598+
if encoding_errors != "replace":
599+
with pytest.raises(UnicodeDecodeError, match=msg):
600+
reader(path, encoding_errors=encoding_errors)
601+
else:
602+
df = reader(path, encoding_errors=encoding_errors)
603+
decoded = bad_encoding.decode(errors=encoding_errors)
604+
expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])
605+
tm.assert_frame_equal(df, expected)
606606

607607

608608
@pytest.mark.parametrize("encoding_errors", [0, None])
@@ -616,11 +616,11 @@ def test_encoding_errors_badtype(encoding_errors):
616616
reader(content)
617617

618618

619-
def test_bad_encdoing_errors():
619+
def test_bad_encdoing_errors(temp_file):
620620
# GH 39777
621-
with tm.ensure_clean() as path:
622-
with pytest.raises(LookupError, match="unknown error handler name"):
623-
icom.get_handle(path, "w", errors="bad")
621+
path = temp_file
622+
with pytest.raises(LookupError, match="unknown error handler name"):
623+
icom.get_handle(path, "w", errors="bad")
624624

625625

626626
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")

pandas/tests/io/test_feather.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -26,34 +26,34 @@
2626

2727
@pytest.mark.single_cpu
2828
class TestFeather:
29-
def check_error_on_write(self, df, exc, err_msg):
29+
def check_error_on_write(self, df, exc, err_msg, temp_file):
3030
# check that we are raising the exception
3131
# on writing
3232

3333
with pytest.raises(exc, match=err_msg):
34-
with tm.ensure_clean() as path:
35-
to_feather(df, path)
34+
to_feather(df, temp_file)
3635

37-
def check_external_error_on_write(self, df):
36+
def check_external_error_on_write(self, df, temp_file):
3837
# check that we are raising the exception
3938
# on writing
4039

4140
with tm.external_error_raised(Exception):
42-
with tm.ensure_clean() as path:
43-
to_feather(df, path)
41+
to_feather(df, temp_file)
4442

45-
def check_round_trip(self, df, expected=None, write_kwargs=None, **read_kwargs):
43+
def check_round_trip(
44+
self, df, temp_file, expected=None, write_kwargs=None, **read_kwargs
45+
):
4646
if write_kwargs is None:
4747
write_kwargs = {}
4848
if expected is None:
4949
expected = df.copy()
5050

51-
with tm.ensure_clean() as path:
52-
to_feather(df, path, **write_kwargs)
51+
path = temp_file
52+
to_feather(df, path, **write_kwargs)
5353

54-
result = read_feather(path, **read_kwargs)
54+
result = read_feather(path, **read_kwargs)
5555

56-
tm.assert_frame_equal(result, expected)
56+
tm.assert_frame_equal(result, expected)
5757

5858
def test_error(self):
5959
msg = "feather only support IO with DataFrames"
@@ -168,7 +168,7 @@ def test_http_path(self, feather_file, httpserver):
168168
tm.assert_frame_equal(expected, res)
169169

170170
def test_read_feather_dtype_backend(
171-
self, string_storage, dtype_backend, using_infer_string
171+
self, string_storage, dtype_backend, using_infer_string, temp_file
172172
):
173173
# GH#50765
174174
df = pd.DataFrame(
@@ -184,10 +184,10 @@ def test_read_feather_dtype_backend(
184184
}
185185
)
186186

187-
with tm.ensure_clean() as path:
188-
to_feather(df, path)
189-
with pd.option_context("mode.string_storage", string_storage):
190-
result = read_feather(path, dtype_backend=dtype_backend)
187+
path = temp_file
188+
to_feather(df, path)
189+
with pd.option_context("mode.string_storage", string_storage):
190+
result = read_feather(path, dtype_backend=dtype_backend)
191191

192192
if dtype_backend == "pyarrow":
193193
pa = pytest.importorskip("pyarrow")
@@ -231,16 +231,16 @@ def test_int_columns_and_index(self):
231231
df = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index([3, 4, 5], name="test"))
232232
self.check_round_trip(df)
233233

234-
def test_invalid_dtype_backend(self):
234+
def test_invalid_dtype_backend(self, tmp_path):
235235
msg = (
236236
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
237237
"'pyarrow' are allowed."
238238
)
239239
df = pd.DataFrame({"int": list(range(1, 4))})
240-
with tm.ensure_clean("tmp.feather") as path:
241-
df.to_feather(path)
242-
with pytest.raises(ValueError, match=msg):
243-
read_feather(path, dtype_backend="numpy")
240+
path = tmp_path / "tmp.feather"
241+
df.to_feather(path)
242+
with pytest.raises(ValueError, match=msg):
243+
read_feather(path, dtype_backend="numpy")
244244

245245
def test_string_inference(self, tmp_path, using_infer_string):
246246
# GH#54431

0 commit comments

Comments
 (0)