diff --git a/python_files/tests/pytestadapter/.data/test_rerunfailures_plugin.py b/python_files/tests/pytestadapter/.data/test_rerunfailures_plugin.py new file mode 100644 index 000000000000..7f152cbb68c4 --- /dev/null +++ b/python_files/tests/pytestadapter/.data/test_rerunfailures_plugin.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest +import os + +@pytest.mark.flaky(reruns=2) +def test_flaky(): # test_marker--test_flaky + # count is not set for first run, but set to 2 for the second run + count = os.environ.get("COUNT") + os.environ["COUNT"] = "2" + # this will fail on the first run, but pass on the second (1 passed, 1 rerun) + assert count == "2" + +def test_flaky_no_marker(): + # this test is flaky and will be run via the command line argument + # count is not set for first run, but set to 2 for the second run + count = os.environ.get("COUNT") + os.environ["COUNT"] = "2" + # this will fail on the first run, but pass on the second (1 passed, 1 rerun) + assert count == "2" diff --git a/python_files/tests/pytestadapter/expected_execution_test_output.py b/python_files/tests/pytestadapter/expected_execution_test_output.py index 8f378074343d..0d8e9af943f3 100644 --- a/python_files/tests/pytestadapter/expected_execution_test_output.py +++ b/python_files/tests/pytestadapter/expected_execution_test_output.py @@ -734,3 +734,33 @@ "subtest": None, }, } + +test_rerunfailures_plugin_path = TEST_DATA_PATH / "test_rerunfailures_plugin.py" +rerunfailures_plugin_expected_execution_output = { + get_absolute_test_id( + "test_rerunfailures_plugin.py::test_flaky", test_rerunfailures_plugin_path + ): { + "test": get_absolute_test_id( + "test_rerunfailures_plugin.py::test_flaky", test_rerunfailures_plugin_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +test_rerunfailures_plugin_path = TEST_DATA_PATH / "test_rerunfailures_plugin.py" +rerunfailures_with_arg_expected_execution_output = { + get_absolute_test_id( + "test_rerunfailures_plugin.py::test_flaky_no_marker", test_rerunfailures_plugin_path + ): { + "test": get_absolute_test_id( + "test_rerunfailures_plugin.py::test_flaky_no_marker", test_rerunfailures_plugin_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} diff --git a/python_files/tests/pytestadapter/helpers.py b/python_files/tests/pytestadapter/helpers.py index 7a75e6248844..d04646471e5c 100644 --- a/python_files/tests/pytestadapter/helpers.py +++ b/python_files/tests/pytestadapter/helpers.py @@ -331,6 +331,7 @@ def runner_with_cwd_env( os.mkfifo(pipe_name) ################# + print("beginning run request") completed = threading.Event() result = [] # result is a string array to store the data during threading diff --git a/python_files/tests/pytestadapter/test_discovery.py b/python_files/tests/pytestadapter/test_discovery.py index 276753149410..22581a98fa92 100644 --- a/python_files/tests/pytestadapter/test_discovery.py +++ b/python_files/tests/pytestadapter/test_discovery.py @@ -251,6 +251,7 @@ def test_pytest_root_dir(): Discovery should succeed and testids should be relative to workspace root. """ + print("running test_pytest_root_dir") rd = f"--rootdir={helpers.TEST_DATA_PATH / 'root' / 'tests'}" actual = helpers.runner_with_cwd( [ @@ -279,6 +280,7 @@ def test_pytest_config_file(): Discovery should succeed and testids should be relative to workspace root. """ + print("running test_pytest_config_file") actual = helpers.runner_with_cwd( [ "--collect-only", @@ -301,6 +303,7 @@ def test_pytest_config_file(): ), f"Tests tree does not match expected value. \n Expected: {json.dumps(expected_discovery_test_output.root_with_config_expected_output, indent=4)}. \n Actual: {json.dumps(actual_item.get('tests'), indent=4)}" +@pytest.mark.timeout(60) def test_config_sub_folder(): """Here the session node will be a subfolder of the workspace root and the test are in another subfolder. @@ -308,6 +311,7 @@ def test_config_sub_folder(): session node is correctly updated to the common path. """ folder_path = helpers.TEST_DATA_PATH / "config_sub_folder" + print("running test_config_sub_folder") actual = helpers.runner_with_cwd( [ "--collect-only", diff --git a/python_files/tests/pytestadapter/test_execution.py b/python_files/tests/pytestadapter/test_execution.py index 245b13cf5d46..746a86dca9bd 100644 --- a/python_files/tests/pytestadapter/test_execution.py +++ b/python_files/tests/pytestadapter/test_execution.py @@ -65,6 +65,23 @@ def test_rootdir_specified(): assert actual_result_dict == expected_const +def test_rerunfailure_with_arg(): + """Test pytest execution when a --rootdir is specified.""" + args = ["--reruns=2", "test_rerunfailures_plugin.py::test_flaky_no_marker"] + actual = runner(args) + expected_const = expected_execution_test_output.rerunfailures_with_arg_expected_execution_output + assert actual + actual_list: List[Dict[str, Dict[str, Any]]] = actual + assert len(actual_list) == len(expected_const) + actual_result_dict = {} + if actual_list is not None: + for actual_item in actual_list: + assert all(item in actual_item for item in ("status", "cwd", "result")) + assert actual_item.get("status") == "success" + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + @pytest.mark.parametrize( ("test_ids", "expected_const"), [ @@ -194,6 +211,13 @@ def test_rootdir_specified(): expected_execution_test_output.nested_describe_expected_execution_output, id="nested_describe_plugin", ), + pytest.param( + [ + "test_rerunfailures_plugin.py::test_flaky", + ], + expected_execution_test_output.rerunfailures_plugin_expected_execution_output, + id="test_rerunfailures_plugin", + ), ], ) def test_pytest_execution(test_ids, expected_const): diff --git a/python_files/vscode_pytest/__init__.py b/python_files/vscode_pytest/__init__.py index 59a1b75e9688..6d7cbe2236d0 100644 --- a/python_files/vscode_pytest/__init__.py +++ b/python_files/vscode_pytest/__init__.py @@ -68,6 +68,7 @@ def __init__(self, message): collected_tests_so_far = [] TEST_RUN_PIPE = os.getenv("TEST_RUN_PIPE") SYMLINK_PATH = None +FLAKY_MAX_RUNS = None def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001 @@ -92,6 +93,11 @@ def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001 global IS_DISCOVERY IS_DISCOVERY = True + # set the reruns value, -1 if not set + global FLAKY_MAX_RUNS + FLAKY_MAX_RUNS = get_reruns_value(args) + print("Plugin info[vscode-pytest]: global FLAKY_MAX_RUNS set to: ", FLAKY_MAX_RUNS) + # check if --rootdir is in the args for arg in args: if "--rootdir=" in arg: @@ -120,6 +126,29 @@ def pytest_load_initial_conftests(early_config, parser, args): # noqa: ARG001 SYMLINK_PATH = rootdir +def get_reruns_value(args): + """ + Extracts the value of the --reruns argument from a list of command-line arguments. + + Args: + args (list of str): A list of command-line arguments. + + Returns: + int: The integer value of the --reruns argument if found, otherwise -1. + """ + for arg in args: + if arg.startswith("--reruns"): + if "=" in arg: + # Extract the value from --reruns= + return int(arg.split("=")[1]) + else: + # Get the value from the next argument + index = args.index(arg) + if index + 1 < len(args): + return int(args[index + 1]) + return -1 + + def pytest_internalerror(excrepr, excinfo): # noqa: ARG001 """A pytest hook that is called when an internal error occurs. @@ -150,11 +179,38 @@ def pytest_exception_interact(node, call, report): else: ERRORS.append(report.longreprtext + "\n Check Python Test Logs for more details.") else: + node_id = get_absolute_test_id(node.nodeid, get_node_path(node)) + # Check if Pytest-rerunfailures is enabled + # check # run this is + # check # of reruns allowed + # if # of reruns is reached, then send the error message, otherwise do not send the error message + + try: + exec_count = node.execution_count + # global is set if arg is present during pytest_load_initial_conftests + # if not present, then -1 + flaky_max_runs = FLAKY_MAX_RUNS + + # check for rerunfailures marker + for m in node.own_markers: + if m.name == "flaky": + flaky_max_runs = m.kwargs.get("reruns", 0) + break + + # flaky_max_runs != -1 means test is flaky + if flaky_max_runs != -1 and exec_count <= flaky_max_runs: + print("flaky test rerun: ", exec_count) + return + elif flaky_max_runs != -1 and exec_count > flaky_max_runs: + print("Plugin info[vscode-pytest]: max reruns reached.") + except AttributeError: + pass + # If during execution, send this data that the given node failed. report_value = "error" if call.excinfo.typename == "AssertionError": report_value = "failure" - node_id = get_absolute_test_id(node.nodeid, get_node_path(node)) + # Only add test to collected_tests_so_far if it is not a flaky test that will re-run if node_id not in collected_tests_so_far: collected_tests_so_far.append(node_id) item_result = create_test_outcome( @@ -280,7 +336,8 @@ def pytest_report_teststatus(report, config): # noqa: ARG001 node_path = cwd # Calculate the absolute test id and use this as the ID moving forward. absolute_node_id = get_absolute_test_id(report.nodeid, node_path) - if absolute_node_id not in collected_tests_so_far: + # If the test is not a rerun, add it to the collected_tests_so_far list. + if report.outcome != "rerun" and absolute_node_id not in collected_tests_so_far: collected_tests_so_far.append(absolute_node_id) item_result = create_test_outcome( absolute_node_id, @@ -654,6 +711,8 @@ def build_nested_folders( counter = 0 max_iter = 100 while iterator_path != session_node_path: + print("iterator_path: ", iterator_path) + print("session_node_path: ", session_node_path) curr_folder_name = iterator_path.name try: curr_folder_node: TestNode = created_files_folders_dict[os.fspath(iterator_path)]