|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +import re |
| 4 | +from collections import defaultdict |
| 5 | +from typing import Iterator |
| 6 | + |
| 7 | +from mypy.test.data import DataDrivenTestCase, DataFileCollector, DataFileFix, parse_test_data |
| 8 | + |
| 9 | + |
| 10 | +def update_testcase_output( |
| 11 | + testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int |
| 12 | +) -> None: |
| 13 | + collector = testcase.parent |
| 14 | + assert isinstance(collector, DataFileCollector) |
| 15 | + for fix in _iter_fixes(testcase, actual, incremental_step=incremental_step): |
| 16 | + collector.enqueue_fix(fix) |
| 17 | + |
| 18 | + |
| 19 | +def _iter_fixes( |
| 20 | + testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int |
| 21 | +) -> Iterator[DataFileFix]: |
| 22 | + reports_by_line: dict[tuple[str, int], list[tuple[str, str]]] = defaultdict(list) |
| 23 | + for error_line in actual: |
| 24 | + comment_match = re.match( |
| 25 | + r"^(?P<filename>[^:]+):(?P<lineno>\d+): (?P<severity>error|note|warning): (?P<msg>.+)$", |
| 26 | + error_line, |
| 27 | + ) |
| 28 | + if comment_match: |
| 29 | + filename = comment_match.group("filename") |
| 30 | + lineno = int(comment_match.group("lineno")) |
| 31 | + severity = comment_match.group("severity") |
| 32 | + msg = comment_match.group("msg") |
| 33 | + reports_by_line[filename, lineno].append((severity, msg)) |
| 34 | + |
| 35 | + test_items = parse_test_data(testcase.data, testcase.name) |
| 36 | + |
| 37 | + # If we have [out] and/or [outN], we update just those sections. |
| 38 | + if any(re.match(r"^out\d*$", test_item.id) for test_item in test_items): |
| 39 | + for test_item in test_items: |
| 40 | + if (incremental_step < 2 and test_item.id == "out") or ( |
| 41 | + incremental_step >= 2 and test_item.id == f"out{incremental_step}" |
| 42 | + ): |
| 43 | + yield DataFileFix( |
| 44 | + lineno=testcase.line + test_item.line - 1, |
| 45 | + end_lineno=testcase.line + test_item.end_line - 1, |
| 46 | + lines=actual + [""] * test_item.trimmed_newlines, |
| 47 | + ) |
| 48 | + |
| 49 | + return |
| 50 | + |
| 51 | + # Update assertion comments within the sections |
| 52 | + for test_item in test_items: |
| 53 | + if test_item.id == "case": |
| 54 | + source_lines = test_item.data |
| 55 | + file_path = "main" |
| 56 | + elif test_item.id == "file": |
| 57 | + source_lines = test_item.data |
| 58 | + file_path = f"tmp/{test_item.arg}" |
| 59 | + else: |
| 60 | + continue # other sections we don't touch |
| 61 | + |
| 62 | + fix_lines = [] |
| 63 | + for lineno, source_line in enumerate(source_lines, start=1): |
| 64 | + reports = reports_by_line.get((file_path, lineno)) |
| 65 | + comment_match = re.search(r"(?P<indent>\s+)(?P<comment># [EWN]: .+)$", source_line) |
| 66 | + if comment_match: |
| 67 | + source_line = source_line[: comment_match.start("indent")] # strip old comment |
| 68 | + if reports: |
| 69 | + indent = comment_match.group("indent") if comment_match else " " |
| 70 | + # multiline comments are on the first line and then on subsequent lines emtpy lines |
| 71 | + # with a continuation backslash |
| 72 | + for j, (severity, msg) in enumerate(reports): |
| 73 | + out_l = source_line if j == 0 else " " * len(source_line) |
| 74 | + is_last = j == len(reports) - 1 |
| 75 | + severity_char = severity[0].upper() |
| 76 | + continuation = "" if is_last else " \\" |
| 77 | + fix_lines.append(f"{out_l}{indent}# {severity_char}: {msg}{continuation}") |
| 78 | + else: |
| 79 | + fix_lines.append(source_line) |
| 80 | + |
| 81 | + yield DataFileFix( |
| 82 | + lineno=testcase.line + test_item.line - 1, |
| 83 | + end_lineno=testcase.line + test_item.end_line - 1, |
| 84 | + lines=fix_lines + [""] * test_item.trimmed_newlines, |
| 85 | + ) |
0 commit comments