easyconfigs-it4i/p/PyTorch/PyTorch-1.11.0_skip_failing_ops_tests.patch
Jakub Kropacek 094a092a73 new file: h/HyperQueue/HyperQueue-0.10.0.eb
new file:   p/PyTorch/PyTorch-1.11.0-foss-2021a-CUDA-11.3.1.eb
	modified:   p/PyTorch/PyTorch-1.11.0-fosscuda-2020b.eb
	new file:   p/PyTorch/PyTorch-1.11.0_disable_failing_jit_cuda_fuser_tests.patch
	new file:   p/PyTorch/PyTorch-1.11.0_fix_sharded_imports.patch
	new file:   p/PyTorch/PyTorch-1.11.0_fix_skip_jit_cuda_fuser.patch
	new file:   p/PyTorch/PyTorch-1.11.0_increase-distributed-test-timeout.patch
	new file:   p/PyTorch/PyTorch-1.11.0_increase_c10d_gloo_timeout.patch
	new file:   p/PyTorch/PyTorch-1.11.0_increase_test_tolerances_TF32.patch
	new file:   p/PyTorch/PyTorch-1.11.0_skip_failing_ops_tests.patch
2022-05-23 11:13:06 +02:00

36 lines
2.3 KiB
Diff

# Author: Caspar van Leeuwen
# Company: SURF
# Test 'test_fn_grad_linalg_det_singular_cpu_complex128' and test_variant_consistency_jit_contiguous_cpu_float32 fail
# See https://github.com/pytorch/pytorch/issues/67767 and https://github.com/pytorch/pytorch/issues/67838
# For the first one, devs recommended to switch it off while they revisit the code.
# For the second: the test works interactively when run with
# python -m unittest test_ops.TestJitCPU.test_variant_consistency_jit_contiguous_cpu_float32 -v
# This shows there is no fundamental problem with the installation,
# but something in the environment when run as 'python run_test.py' makes it fail.
diff -Nru pytorch-1.11.0-rc3.orig/torch/testing/_internal/common_methods_invocations.py pytorch-1.11.0-rc3/torch/testing/_internal/common_methods_invocations.py
--- pytorch-1.11.0-rc3.orig/torch/testing/_internal/common_methods_invocations.py 2022-02-24 18:07:16.430276050 +0100
+++ pytorch-1.11.0-rc3/torch/testing/_internal/common_methods_invocations.py 2022-02-24 19:38:11.610293957 +0100
@@ -8791,7 +8791,10 @@
supports_fwgrad_bwgrad=True,
autodiff_fusible_nodes=['aten::contiguous'],
assert_jit_shape_analysis=True,
- supports_out=False),
+ supports_out=False,
+ skips=(
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cpu'),
+ )),
OpInfo('sum_to_size',
op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
@@ -9746,6 +9749,10 @@
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
+ # It also breaks on CPU. We'll revisit this once `linalg.lu_solve` is a thing
+ # See https://github.com/pytorch/pytorch/pull/64387 and https://github.com/pytorch/pytorch/issues/67767
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
+ dtypes=(torch.complex128,)),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',