From 86e24ec16fee94117cf4ee1985ca8e4778769276 Mon Sep 17 00:00:00 2001 From: "Deng, Daisy" Date: Thu, 6 Nov 2025 14:21:27 +0000 Subject: [PATCH 1/2] use stock pytorch defintion of python_ref_db for test_ops --- test/xpu/skip_list_common.py | 4 ++++ test/xpu/xpu_test_utils.py | 21 +++++---------------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 670dbee1f..dec34b65e 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -14,6 +14,10 @@ "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32", # https://github.com/intel/torch-xpu-ops/issues/2254 "histogramdd", + "_vdot_", + "_dot_", + "_flash_attention_", + "_efficient_attention_", ), "test_binary_ufuncs_xpu.py": ( "test_fmod_remainder_by_zero_integral_xpu_int64", # zero division is an undefined behavior: different handles on different backends diff --git a/test/xpu/xpu_test_utils.py b/test/xpu/xpu_test_utils.py index ac89eab6d..9912c2e63 100644 --- a/test/xpu/xpu_test_utils.py +++ b/test/xpu/xpu_test_utils.py @@ -7,7 +7,7 @@ import unittest import torch -from torch import bfloat16, cuda +from torch import cuda from torch.testing._internal import ( common_cuda, common_device_type, @@ -865,7 +865,6 @@ def __init__(self, patch_test_case=True) -> None: ) self.foreach_reduce_op_db = common_methods_invocations.foreach_reduce_op_db self.foreach_other_op_db = common_methods_invocations.foreach_other_op_db - self.python_ref_db = common_methods_invocations.python_ref_db self.ops_and_refs = common_methods_invocations.ops_and_refs self.largeTensorTest = common_device_type.largeTensorTest self.TEST_CUDA = common_cuda.TEST_CUDA @@ -921,19 +920,10 @@ def gen_xpu_wrappers(op_name, wrappers): def align_supported_dtypes(self, db): for opinfo in db: - if ( - opinfo.name not in _xpu_computation_op_list - and ( - opinfo.torch_opinfo.name not in _xpu_computation_op_list - if db == common_methods_invocations.python_ref_db - else True - ) - ) or opinfo.name in _ops_without_cuda_support: + if opinfo.name in _ops_without_cuda_support: opinfo.dtypesIf["xpu"] = opinfo.dtypes else: backward_dtypes = set(opinfo.backward_dtypesIfCUDA) - if bfloat16 in opinfo.dtypesIf["xpu"]: - backward_dtypes.add(bfloat16) opinfo.backward_dtypes = tuple(backward_dtypes) if opinfo.name in _ops_dtype_different_cuda_support: @@ -1039,13 +1029,13 @@ def __init__(self, *args): self.align_db_decorators(db) self.filter_fp64_sample_input(db) self.align_db_decorators(module_db) - common_methods_invocations.python_ref_db = [ + _python_ref_db = [ op - for op in self.python_ref_db + for op in common_methods_invocations.python_ref_db if op.torch_opinfo_name in _xpu_computation_op_list ] common_methods_invocations.ops_and_refs = ( - common_methods_invocations.op_db + common_methods_invocations.python_ref_db + common_methods_invocations.op_db + _python_ref_db ) common_methods_invocations.unary_ufuncs = [ op @@ -1128,7 +1118,6 @@ def __exit__(self, exc_type, exc_value, traceback): self.instantiate_parametrized_tests_fn ) common_utils.TestCase = self.test_case_cls - common_methods_invocations.python_ref_db = self.python_ref_db common_methods_invocations.ops_and_refs = self.ops_and_refs common_device_type.largeTensorTest = self.largeTensorTest common_cuda.TEST_CUDA = self.TEST_CUDA From 23b80325da1fbb6a9932744619e50b6fd7c46eb2 Mon Sep 17 00:00:00 2001 From: "Deng, Daisy" Date: Fri, 7 Nov 2025 03:32:14 +0000 Subject: [PATCH 2/2] add _refs.true_divide in _cuda_xfail_xpu_pass list --- test/xpu/xpu_test_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/xpu/xpu_test_utils.py b/test/xpu/xpu_test_utils.py index 9912c2e63..a4d57b607 100644 --- a/test/xpu/xpu_test_utils.py +++ b/test/xpu/xpu_test_utils.py @@ -354,6 +354,11 @@ "_refs.div", "test_python_ref_torch_fallback", ), + ("_refs.true_div", "test_python_ref"), + ( + "_refs.true_div", + "test_python_ref_torch_fallback", + ), ("argsort", "test_non_standard_bool_values"), ("sort", "test_non_standard_bool_values"), ]