Skip to content

Commit

Permalink
delete failing tests (#679)
Browse files Browse the repository at this point in the history
  • Loading branch information
Samantha Andow authored Apr 8, 2022
1 parent f5ce614 commit 6a52d17
Showing 1 changed file with 0 additions and 24 deletions.
24 changes: 0 additions & 24 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,13 +375,6 @@ def wrapped_fn(*args, **kwargs):
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
skip('nn.functional.dropout2d', ''), # fails on cuda, runs okay on cpu
# See https://github.com/pytorch/pytorch/issues/69034
# RuntimeError: expected scalar type double but found float
xfail('minimum'),
xfail('min', 'binary'),
xfail('maximum'),
xfail('max', 'binary'),
# The following don't have a forward-mode AD formula in PyTorch core
# (check derivatives.yaml).
xfail('var_mean'),
Expand Down Expand Up @@ -675,14 +668,6 @@ def test_vmapvjp(self, device, dtype, op):
# https://gist.github.com/zou3519/c42d032c0111c6b65235583d391bf7a3
xfail('nn.functional.linear'),
# These are issues that should be fixed in core. See repro in core:
# https://github.com/pytorch/functorch/pull/232#discussion_r751405155
# RuntimeError: expected scalar type double but found float
xfail('minimum'),
xfail('min', 'binary'),
xfail('maximum'),
xfail('max', 'binary'),
# Apprently these support forward AD, but we get "Trying to use forward AD..."
# These are cases where OpInfo has supports_forward_ad=True, but disables
# the test
Expand Down Expand Up @@ -770,7 +755,6 @@ def test_vmapjvp(self, device, dtype, op):
xfail('linalg.inv'),
xfail('linalg.tensorinv'),
xfail('linalg.matrix_power'),
xfail('maximum'),
xfail('linalg.householder_product'),
xfail('tensor_split'),
xfail('quantile'),
Expand All @@ -779,13 +763,10 @@ def test_vmapjvp(self, device, dtype, op):
xfail('linalg.eigvalsh'),
xfail('fill_'),
xfail('linalg.cholesky'),
xfail('max', 'binary'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('min', 'binary'),
xfail('std_mean'),
xfail('double', 'channels_last'),
xfail('block_diag'),
xfail('minimum'),
xfail('scatter'),
xfail('matrix_exp'),
xfail('nanquantile'),
Expand Down Expand Up @@ -1148,10 +1129,6 @@ def test_vjpvmap(self, device, dtype, op):
xfail('lu', ''),
xfail('lu_solve', ''),
xfail('lu_unpack', ''),
xfail('max', 'binary'),
xfail('maximum', ''),
xfail('min', 'binary'),
xfail('minimum', ''),
xfail('nanmean', ''),
xfail('nansum', ''),
xfail('nn.functional.batch_norm', ''),
Expand All @@ -1173,7 +1150,6 @@ def test_vjpvmap(self, device, dtype, op):
xfail('nn.functional.huber_loss', ''),
xfail('nn.functional.instance_norm', ''),
xfail('nn.functional.layer_norm', ''),
xfail('nn.functional.leaky_relu', ''),
xfail('nn.functional.logsigmoid', ''),
xfail('nn.functional.mse_loss', ''),
xfail('nn.functional.nll_loss', ''),
Expand Down

0 comments on commit 6a52d17

Please sign in to comment.