Skip to content

Commit 02baadb

Browse files
committed
XFAIL/SKIP Sparse tests
1 parent 95e2aa4 commit 02baadb

File tree

6 files changed

+21
-30
lines changed

6 files changed

+21
-30
lines changed

tests/compile/function/test_pfunc.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import numpy as np
22
import pytest
3+
import scipy as sp
34

45
import pytensor.tensor as pt
56
from pytensor.compile import UnusedInputError, get_mode
@@ -9,6 +10,7 @@
910
from pytensor.compile.sharedvalue import shared
1011
from pytensor.configdefaults import config
1112
from pytensor.graph.utils import MissingInputError
13+
from pytensor.sparse import SparseTensorType
1214
from pytensor.tensor.math import sum as pt_sum
1315
from pytensor.tensor.type import (
1416
bscalar,
@@ -763,18 +765,15 @@ def test_shared_constructor_copies(self):
763765
# rule #2 reading back from pytensor-managed memory
764766
assert not np.may_share_memory(A.get_value(borrow=False), data_of(A))
765767

768+
@pytest.mark.xfail(reason="Numba does not support Sparse Ops yet")
766769
def test_sparse_input_aliasing_affecting_inplace_operations(self):
767-
sp = pytest.importorskip("scipy", minversion="0.7.0")
768-
769-
from pytensor import sparse
770-
771770
# Note: to trigger this bug with pytensor rev 4586:2bc6fc7f218b,
772771
# you need to make in inputs mutable (so that inplace
773772
# operations are used) and to break the elemwise composition
774773
# with some non-elemwise op (here dot)
775774

776-
x = sparse.SparseTensorType("csc", dtype="float64")()
777-
y = sparse.SparseTensorType("csc", dtype="float64")()
775+
x = SparseTensorType("csc", dtype="float64")()
776+
y = SparseTensorType("csc", dtype="float64")()
778777
f = function([In(x, mutable=True), In(y, mutable=True)], (x + y) + (x + y))
779778
# Test 1. If the same variable is given twice
780779

tests/sparse/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
import pytest
2+
3+
4+
pytest.skip("Numba does not support Sparse Ops yet", allow_module_level=True)

tests/sparse/test_basic.py

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
import pytensor.sparse.math
99
import pytensor.tensor as pt
1010
from pytensor import sparse
11-
from pytensor.compile.function import function
1211
from pytensor.compile.io import In
1312
from pytensor.configdefaults import config
1413
from pytensor.gradient import GradientError
@@ -87,19 +86,6 @@ def as_sparse_format(data, format):
8786
raise NotImplementedError()
8887

8988

90-
def eval_outputs(outputs):
91-
return function([], outputs)()[0]
92-
93-
94-
# scipy 0.17 will return sparse values in all cases while previous
95-
# version sometimes wouldn't. This will make everything dense so that
96-
# we can use assert_allclose.
97-
def as_ndarray(val):
98-
if hasattr(val, "toarray"):
99-
return val.toarray()
100-
return val
101-
102-
10389
def random_lil(shape, dtype, nnz):
10490
rval = scipy_sparse.lil_matrix(shape, dtype=dtype)
10591
huge = 2**30
@@ -355,7 +341,7 @@ def test_transpose_csc(self):
355341
assert ta.type.dtype == "float64", ta.type.dtype
356342
assert ta.type.format == "csr", ta.type.format
357343

358-
vta = eval_outputs([ta])
344+
vta = ta.eval()
359345
assert vta.shape == (3, 5)
360346

361347
def test_transpose_csr(self):
@@ -367,7 +353,7 @@ def test_transpose_csr(self):
367353
assert ta.type.dtype == "float64", ta.type.dtype
368354
assert ta.type.format == "csc", ta.type.format
369355

370-
vta = eval_outputs([ta])
356+
vta = ta.eval()
371357
assert vta.shape == (3, 5)
372358

373359

@@ -544,13 +530,13 @@ def test_basic(self):
544530
test_val = np.random.random((5,)).astype(config.floatX)
545531
a = pt.as_tensor_variable(test_val)
546532
s = csc_from_dense(a)
547-
val = eval_outputs([s])
533+
val = s.eval()
548534
assert str(val.dtype) == config.floatX
549535
assert val.format == "csc"
550536

551537
a = pt.as_tensor_variable(test_val)
552538
s = csr_from_dense(a)
553-
val = eval_outputs([s])
539+
val = s.eval()
554540
assert str(val.dtype) == config.floatX
555541
assert val.format == "csr"
556542

@@ -573,7 +559,7 @@ def test_dense_from_sparse(self):
573559
s = t(scipy_sparse.identity(5))
574560
s = as_sparse_variable(s)
575561
d = dense_from_sparse(s)
576-
val = eval_outputs([d])
562+
val = d.eval()
577563
assert str(val.dtype) == s.dtype
578564
assert np.all(val[0] == [1, 0, 0, 0, 0])
579565

@@ -583,7 +569,7 @@ def test_todense(self):
583569
s = t(scipy_sparse.identity(5))
584570
s = as_sparse_variable(s)
585571
d = s.toarray()
586-
val = eval_outputs([d])
572+
val = d.eval()
587573
assert str(val.dtype) == s.dtype
588574
assert np.all(val[0] == [1, 0, 0, 0, 0])
589575

tests/sparse/test_math.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@
5454
)
5555
from tests import unittest_tools as utt
5656
from tests.sparse.test_basic import (
57-
as_ndarray,
5857
as_sparse_format,
5958
random_lil,
6059
sparse_random_inputs,
@@ -1020,7 +1019,7 @@ def test_op(self):
10201019
tested = f(*self.a)
10211020
x, y, p = self.a
10221021
expected = p.multiply(np.dot(x, y.T))
1023-
utt.assert_allclose(as_ndarray(expected), tested.toarray())
1022+
utt.assert_allclose(expected.toarray(), tested.toarray())
10241023
assert tested.format == "csr"
10251024
assert tested.dtype == expected.dtype
10261025

@@ -1030,7 +1029,7 @@ def test_negative_stride(self):
10301029
tested = f(*a2)
10311030
x, y, p = a2
10321031
expected = p.multiply(np.dot(x, y.T))
1033-
utt.assert_allclose(as_ndarray(expected), tested.toarray())
1032+
utt.assert_allclose(expected.toarray(), tested.toarray())
10341033
assert tested.format == "csr"
10351034
assert tested.dtype == expected.dtype
10361035

@@ -1098,7 +1097,7 @@ def test_structured_add_s_v(self):
10981097

10991098
out = f(spmat, mat)
11001099
utt.assert_allclose(
1101-
as_ndarray(spones.multiply(spmat + mat)), out.toarray()
1100+
spones.multiply(spmat + mat).toarray(), out.toarray()
11021101
)
11031102

11041103

tests/test_raise_op.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,7 @@ def test_infer_shape_scalar(self):
181181
)
182182

183183

184+
@pytest.mark.xfail(reason="Numba does not support Sparse Ops yet")
184185
def test_CheckAndRaise_sparse_variable():
185186
check_and_raise = CheckAndRaise(ValueError, "sparse_check")
186187

tests/typed_list/test_basic.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -452,6 +452,7 @@ def test_non_tensor_type(self):
452452

453453
assert f([[x, y], [x, y, y]], [x, y]) == 0
454454

455+
@pytest.mark.xfail(reason="Numba does not support Sparse Ops yet")
455456
def test_sparse(self):
456457
mySymbolicSparseList = TypedListType(
457458
sparse.SparseTensorType("csr", pytensor.config.floatX)
@@ -519,6 +520,7 @@ def test_non_tensor_type(self):
519520

520521
assert f([[x, y], [x, y, y]], [x, y]) == 1
521522

523+
@pytest.mark.xfail(reason="Numba does not support Sparse Ops yet")
522524
def test_sparse(self):
523525
mySymbolicSparseList = TypedListType(
524526
sparse.SparseTensorType("csr", pytensor.config.floatX)

0 commit comments

Comments
 (0)