Skip to content

Commit

Permalink
Run only listed tests on s390x (pytorch#140265)
Browse files Browse the repository at this point in the history
Skip tests that are failing

This was previously part of pytorch#125401

Pull Request resolved: pytorch#140265
Approved by: https://github.com/malfet

Co-authored-by: Nikita Shulga <[email protected]>
  • Loading branch information
2 people authored and pytorchmergebot committed Nov 20, 2024
1 parent 701e06b commit a82bab6
Show file tree
Hide file tree
Showing 13 changed files with 398 additions and 3 deletions.
14 changes: 13 additions & 1 deletion test/ao/sparsity/test_composability.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
prepare_fx,
prepare_qat_fx,
)
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_utils import TestCase, xfailIfS390X


logging.basicConfig(
Expand Down Expand Up @@ -75,6 +75,7 @@ class TestComposability(TestCase):
# This test checks whether performing quantization prepare before sparse prepare
# causes any issues and verifies that the correct observers are inserted and that
# the quantized model works as expected
@xfailIfS390X
def test_q_prep_before_s_prep(self):
(
mod,
Expand Down Expand Up @@ -104,6 +105,7 @@ def test_q_prep_before_s_prep(self):
# the post sparse prepare module names (adding parametrizations changes the module class names)
# which would result in those parametrized modules not being quantized. This test verifies that
# the fix for this was successful.
@xfailIfS390X
def test_s_prep_before_q_prep(self):
(
mod,
Expand Down Expand Up @@ -135,6 +137,7 @@ def test_s_prep_before_q_prep(self):
# that the problem outlined in test_s_prep_before_q_prep would occur. This test verifies
# both that the fix to the convert flow avoids this issue and that the resulting quantized
# module uses the sparse version of the weight value.
@xfailIfS390X
def test_convert_without_squash_mask(self):
(
mod,
Expand Down Expand Up @@ -175,6 +178,7 @@ def test_convert_without_squash_mask(self):
# This tests whether performing sparse prepare before fusion causes any issues. The
# worry was that the link created between the sparsifier and the modules that need to
# be sparsified would be broken.
@xfailIfS390X
def test_s_prep_before_fusion(self):
(
mod,
Expand Down Expand Up @@ -204,6 +208,7 @@ def test_s_prep_before_fusion(self):

# This tests whether performing fusion before sparse prepare causes and issues. The
# main worry was that the links to the modules in the sparse config would be broken by fusion.
@xfailIfS390X
def test_fusion_before_s_prep(self):
(
mod,
Expand Down Expand Up @@ -258,6 +263,7 @@ def test_fusion_before_s_prep(self):
# The primary worries were that qat_prep wouldn't recognize the parametrized
# modules and that the convert step for qat would remove the parametrizations
# from the modules.
@xfailIfS390X
def test_s_prep_before_qat_prep(self):
(
mod,
Expand Down Expand Up @@ -285,6 +291,7 @@ def test_s_prep_before_qat_prep(self):
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])

# This tests whether performing qat prepare before sparse prepare causes issues.
@xfailIfS390X
def test_qat_prep_before_s_prep(self):
mod, sparsifier, _ = _get_model_and_sparsifier_and_sparse_config(
tq.get_default_qat_qconfig("fbgemm")
Expand Down Expand Up @@ -338,6 +345,7 @@ class TestFxComposability(TestCase):
compose cleanly despite variation in sequencing.
"""

@xfailIfS390X
def test_q_prep_fx_before_s_prep(self):
r"""
This test checks that the ordering of prepare_fx -> sparse prepare -> convert_fx
Expand Down Expand Up @@ -403,6 +411,7 @@ def test_q_prep_fx_before_s_prep(self):
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])

@xfailIfS390X
def test_q_prep_fx_s_prep_ref_conv(self):
r"""
This checks that the ordering: prepare_fx -> sparse prepare -> convert_to_reference_fx
Expand Down Expand Up @@ -470,6 +479,7 @@ def test_q_prep_fx_s_prep_ref_conv(self):
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])

@xfailIfS390X
def test_s_prep_before_q_prep_fx(self):
r"""
This test checks that the ordering of sparse prepare -> prepare_fx -> convert_fx
Expand Down Expand Up @@ -521,6 +531,7 @@ def test_s_prep_before_q_prep_fx(self):
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])

@xfailIfS390X
def test_s_prep_before_qat_prep_fx(self):
r"""
This test checks that the ordering of sparse prepare -> prepare_qat_fx -> convert_fx
Expand Down Expand Up @@ -575,6 +586,7 @@ def test_s_prep_before_qat_prep_fx(self):
)
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])

@xfailIfS390X
def test_s_prep_q_prep_fx_ref(self):
r"""
This checks that the ordering: sparse prepare -> prepare_fx -> convert_to_reference_fx
Expand Down
4 changes: 4 additions & 0 deletions test/functorch/test_aotdispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
skipIfTorchDynamo,
TestCase,
xfail_inherited_tests,
xfailIfS390X,
xfailIfTorchDynamo,
)
from torch.testing._internal.custom_tensor import ConstantExtraMetadataTensor
Expand Down Expand Up @@ -6525,6 +6526,7 @@ class TestEagerFusionOpInfo(AOTTestCase):
def test_aot_autograd_exhaustive(self, device, dtype, op):
_test_aot_autograd_helper(self, device, dtype, op)

@xfailIfS390X
@ops(op_db + hop_db, allowed_dtypes=(torch.float,))
@patch("functorch.compile.config.debug_assert", True)
@skipOps(
Expand Down Expand Up @@ -6571,11 +6573,13 @@ def test_aot_autograd_symbolic_exhaustive(self, device, dtype, op):


class TestEagerFusionModuleInfo(AOTTestCase):
@xfailIfS390X
@modules(module_db, allowed_dtypes=(torch.float,))
@decorateForModules(unittest.expectedFailure, aot_autograd_module_failures)
def test_aot_autograd_module_exhaustive(self, device, dtype, training, module_info):
_test_aot_autograd_module_helper(self, device, dtype, training, module_info)

@xfailIfS390X
@modules(module_db, allowed_dtypes=(torch.float,))
@decorateForModules(
unittest.expectedFailure,
Expand Down
7 changes: 7 additions & 0 deletions test/functorch/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
TEST_WITH_ROCM,
TestCase,
unMarkDynamoStrictTest,
xfailIfS390X,
)
from torch.testing._internal.opinfo.core import SampleInput
from torch.utils import _pytree as pytree
Expand Down Expand Up @@ -1036,6 +1037,12 @@ def fn(inp, *args, **kwargs):
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
decorate("linalg.tensorsolve", decorator=xfailIfS390X),
decorate("nn.functional.max_pool1d", decorator=xfailIfS390X),
decorate("nn.functional.max_unpool2d", decorator=xfailIfS390X),
decorate(
"nn.functional.multilabel_margin_loss", decorator=xfailIfS390X
),
}
),
)
Expand Down
8 changes: 7 additions & 1 deletion test/inductor/test_compiled_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,11 @@
from torch._dynamo.utils import counters
from torch._inductor import config as inductor_config
from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_utils import scoped_load_inline, skipIfWindows
from torch.testing._internal.common_utils import (
scoped_load_inline,
skipIfWindows,
xfailIfS390X,
)
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_CUDA, HAS_GPU
from torch.testing._internal.logging_utils import logs_to_string

Expand Down Expand Up @@ -2753,6 +2757,7 @@ def test_logs(self):
not in logs.getvalue()
)

@xfailIfS390X
def test_verbose_logs_graph(self):
def fn():
model = torch.nn.Sequential(
Expand Down Expand Up @@ -2965,6 +2970,7 @@ def fn(x, obj):
)

@skipIfWindows(msg="AssertionError: Scalars are not equal!")
@xfailIfS390X
def test_verbose_logs_cpp(self):
torch._logging.set_logs(compiled_autograd_verbose=True)

Expand Down
3 changes: 3 additions & 0 deletions test/inductor/test_cpu_repro.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
skipIfRocm,
slowTest,
TEST_MKL,
xfailIfS390X,
)
from torch.utils._python_dispatch import TorchDispatchMode

Expand Down Expand Up @@ -3036,6 +3037,7 @@ def fn(a, b):
kernel_profile_events.append(e.name)
assert len(kernel_profile_events) > 0

@xfailIfS390X
@requires_vectorization
def test_channel_shuffle_cl_output(self):
"""code and shape extracted from shufflenet_v2_x1_0"""
Expand Down Expand Up @@ -3817,6 +3819,7 @@ def forward(self, idx, x):
self.assertTrue("cvt_lowp_fp_to_fp32" not in code)
self.assertTrue("cvt_fp32_to_lowp_fp" not in code)

@xfailIfS390X
def test_concat_inner_vec(self):
def fn(x, y):
return F.relu(torch.cat([x, y], dim=1))
Expand Down
3 changes: 2 additions & 1 deletion test/inductor/test_extension_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
get_wrapper_codegen_for_device,
register_backend_for_device,
)
from torch.testing._internal.common_utils import IS_FBCODE, IS_MACOS
from torch.testing._internal.common_utils import IS_FBCODE, IS_MACOS, xfailIfS390X


try:
Expand All @@ -50,6 +50,7 @@
TestCase = test_torchinductor.TestCase


@xfailIfS390X
class BaseExtensionBackendTests(TestCase):
module = None

Expand Down
5 changes: 5 additions & 0 deletions test/inductor/test_torchinductor.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
subtest,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
xfailIfS390X,
)
from torch.utils import _pytree as pytree
from torch.utils._python_dispatch import TorchDispatchMode
Expand Down Expand Up @@ -1901,6 +1902,7 @@ def fn(a):

@skip_if_gpu_halide
@skipCPUIf(IS_MACOS, "fails on macos")
@xfailIfS390X
def test_multilayer_var(self):
def fn(a):
return torch.var(a)
Expand All @@ -1920,6 +1922,7 @@ def fn(a):

@skipCPUIf(IS_MACOS, "fails on macos")
@skip_if_halide # accuracy 4.7% off
@xfailIfS390X
def test_multilayer_var_lowp(self):
def fn(a):
return torch.var(a)
Expand Down Expand Up @@ -9124,6 +9127,7 @@ def forward(
"TODO: debug this with asan",
)
@skip_if_gpu_halide
@xfailIfS390X
def test_tmp_not_defined_issue2(self):
def forward(arg38_1, arg81_1, getitem_17, new_zeros_default_4):
div_tensor_7 = torch.ops.aten.div.Tensor(getitem_17, arg81_1)
Expand Down Expand Up @@ -10312,6 +10316,7 @@ def func(arg0_1):
# Calling div only torch.SymInt arguments is not yet supported.
# To support this behavior, we need to allow const-propping tensors that store symint data.
# For now, dynamo will explicitly graph break when it encounters user code with this behavior.
@xfailIfS390X
@expectedFailureCodegenDynamic
@skip_if_gpu_halide # accuracy error
def test_AllenaiLongformerBase_repro(self):
Expand Down
Loading

0 comments on commit a82bab6

Please sign in to comment.