Skip to content

Cells

Solution stripping, marker validation, grading cell injection, and notebook post-processing.

mograder.grading.cells

Notebook cell manipulation: marker validation, solution stripping, grading cell injection, and mark/feedback parsing.

validate_markers(lines, filepath)

Check that all solution and hidden-test markers are properly paired.

Returns a list of error messages (empty if valid).

Source code in src/mograder/grading/cells.py
def validate_markers(lines: list[str], filepath: str) -> list[str]:
    """Check that all solution and hidden-test markers are properly paired.

    Returns a list of error messages (empty if valid).
    """
    errors = []
    in_solution = False
    in_hidden = False
    sol_start_line = 0
    hidden_start_line = 0

    for i, line in enumerate(lines, 1):
        stripped = line.strip()

        if stripped == SOLUTION_BEGIN:
            if in_solution:
                errors.append(
                    f"{filepath}:{i}: nested {SOLUTION_BEGIN} "
                    f"(previous opened at line {sol_start_line})"
                )
            if in_hidden:
                errors.append(
                    f"{filepath}:{i}: {SOLUTION_BEGIN} inside hidden tests block "
                    f"(opened at line {hidden_start_line})"
                )
            in_solution = True
            sol_start_line = i

        elif stripped == SOLUTION_END:
            if not in_solution:
                errors.append(
                    f"{filepath}:{i}: {SOLUTION_END} without matching {SOLUTION_BEGIN}"
                )
            in_solution = False

        elif stripped == HIDDEN_TESTS_BEGIN:
            if in_hidden:
                errors.append(
                    f"{filepath}:{i}: nested {HIDDEN_TESTS_BEGIN} "
                    f"(previous opened at line {hidden_start_line})"
                )
            if in_solution:
                errors.append(
                    f"{filepath}:{i}: {HIDDEN_TESTS_BEGIN} inside solution block "
                    f"(opened at line {sol_start_line})"
                )
            in_hidden = True
            hidden_start_line = i

        elif stripped == HIDDEN_TESTS_END:
            if not in_hidden:
                errors.append(
                    f"{filepath}:{i}: {HIDDEN_TESTS_END} without matching "
                    f"{HIDDEN_TESTS_BEGIN}"
                )
            in_hidden = False

    if in_solution:
        errors.append(f"{filepath}:{sol_start_line}: unclosed {SOLUTION_BEGIN}")
    if in_hidden:
        errors.append(f"{filepath}:{hidden_start_line}: unclosed {HIDDEN_TESTS_BEGIN}")

    return errors

strip_solutions(lines)

Remove solution blocks from source lines.

Lines between BEGIN SOLUTION / END SOLUTION are replaced with # YOUR CODE HERE and pass at the correct indentation. The pass ensures empty function bodies remain syntactically valid.

When a return statement after END SOLUTION references simple variable names that are only defined inside the removed solution block, sentinel assignments (name = ...) are inserted before the placeholder so the return does not raise :class:NameError.

Source code in src/mograder/grading/cells.py
def strip_solutions(lines: list[str]) -> list[str]:
    """Remove solution blocks from source lines.

    Lines between BEGIN SOLUTION / END SOLUTION are replaced with
    ``# YOUR CODE HERE`` and ``pass`` at the correct indentation.
    The ``pass`` ensures empty function bodies remain syntactically valid.

    When a ``return`` statement after END SOLUTION references simple
    variable names that are only defined inside the removed solution block,
    sentinel assignments (``name = ...``) are inserted before the
    placeholder so the return does not raise :class:`NameError`.
    """
    sentinels = _find_sentinel_vars(lines)

    output = []
    in_solution = False
    solution_indent = ""
    block_idx = -1

    for line in lines:
        stripped = line.strip()

        if stripped == SOLUTION_BEGIN:
            in_solution = True
            block_idx += 1
            solution_indent = line[: len(line) - len(line.lstrip())]
            continue

        if stripped == SOLUTION_END:
            in_solution = False
            # Insert sentinels for orphaned return variables
            for name in sentinels.get(block_idx, []):
                output.append(f"{solution_indent}{name} = ...\n")
            output.append(f"{solution_indent}# YOUR CODE HERE\n")
            output.append(f"{solution_indent}pass\n")
            continue

        if not in_solution:
            output.append(line)

    return output

count_markers(lines)

Count solution blocks.

Source code in src/mograder/grading/cells.py
def count_markers(lines: list[str]) -> int:
    """Count solution blocks."""
    return sum(1 for line in lines if line.strip() == SOLUTION_BEGIN)

count_hidden_markers(lines)

Count hidden test blocks.

Source code in src/mograder/grading/cells.py
def count_hidden_markers(lines: list[str]) -> int:
    """Count hidden test blocks."""
    return sum(1 for line in lines if line.strip() == HIDDEN_TESTS_BEGIN)

strip_hidden_tests(lines)

Remove hidden test blocks from source lines.

Lines between BEGIN HIDDEN TESTS / END HIDDEN TESTS are replaced with a single # HIDDEN TESTS placeholder comment at the correct indentation.

Source code in src/mograder/grading/cells.py
def strip_hidden_tests(lines: list[str]) -> list[str]:
    """Remove hidden test blocks from source lines.

    Lines between BEGIN HIDDEN TESTS / END HIDDEN TESTS are replaced with
    a single ``# HIDDEN TESTS`` placeholder comment at the correct indentation.
    """
    output = []
    in_hidden = False
    hidden_indent = ""

    for line in lines:
        stripped = line.strip()

        if stripped == HIDDEN_TESTS_BEGIN:
            in_hidden = True
            hidden_indent = line[: len(line) - len(line.lstrip())]
            continue

        if stripped == HIDDEN_TESTS_END:
            in_hidden = False
            output.append(f"{hidden_indent}# HIDDEN TESTS\n")
            continue

        if not in_hidden:
            output.append(line)

    return output

extract_hidden_tests(lines)

Extract hidden test blocks as (indent, lines) tuples.

Each tuple contains the indentation prefix and the list of lines (with their original indentation) from one hidden-test block. Used during autograde to reinject hidden tests into submitted notebooks.

Source code in src/mograder/grading/cells.py
def extract_hidden_tests(lines: list[str]) -> list[tuple[str, list[str]]]:
    """Extract hidden test blocks as ``(indent, lines)`` tuples.

    Each tuple contains the indentation prefix and the list of lines
    (with their original indentation) from one hidden-test block.
    Used during autograde to reinject hidden tests into submitted notebooks.
    """
    blocks: list[tuple[str, list[str]]] = []
    current_lines: list[str] = []
    in_hidden = False
    indent = ""

    for line in lines:
        stripped = line.strip()

        if stripped == HIDDEN_TESTS_BEGIN:
            in_hidden = True
            indent = line[: len(line) - len(line.lstrip())]
            current_lines = []
            continue

        if stripped == HIDDEN_TESTS_END:
            in_hidden = False
            blocks.append((indent, current_lines))
            continue

        if in_hidden:
            current_lines.append(line)

    return blocks

convert_markdown_cells(lines)

Convert stripped markdown answer cells to editable mo.md() blocks.

After strip_solutions(), markdown answer cells look like::

response_text = "placeholder text"
# YOUR CODE HERE
pass
mo.md(response_text)

This function converts them to::

mo.md(r\"\"\"
placeholder text
\"\"\")

so that students see a clean editable markdown cell instead of ugly placeholder code.

Source code in src/mograder/grading/cells.py
def convert_markdown_cells(lines: list[str]) -> list[str]:
    """Convert stripped markdown answer cells to editable mo.md() blocks.

    After ``strip_solutions()``, markdown answer cells look like::

        response_text = "placeholder text"
        # YOUR CODE HERE
        pass
        mo.md(response_text)

    This function converts them to::

        mo.md(r\\"\\"\\"
        placeholder text
        \\"\\"\\")

    so that students see a clean editable markdown cell instead of ugly
    placeholder code.
    """
    output = []
    i = 0
    while i < len(lines):
        # Try to match the 4-line pattern
        if i + 3 < len(lines):
            line0 = lines[i]
            line1 = lines[i + 1]
            line2 = lines[i + 2]
            line3 = lines[i + 3]
            m = re.match(r'^(\s*)response_text\s*=\s*["\'](.+?)["\']\s*$', line0)
            if (
                m
                and line1.strip() == "# YOUR CODE HERE"
                and line2.strip() == "pass"
                and line3.strip() == "mo.md(response_text)"
            ):
                indent = m.group(1)
                placeholder = m.group(2)
                output.append(f'{indent}mo.md(r"""\n')
                output.append(f"{indent}{placeholder}\n")
                output.append(f'{indent}""")\n')
                i += 4
                continue
        output.append(lines[i])
        i += 1
    return output

build_submit_cell(server_url, assignment_name)

Build a submit cell that uses mograder.remote.submit().

Returns source text for two marimo cells (username input + submit action).

Source code in src/mograder/grading/cells.py
def build_submit_cell(server_url: str, assignment_name: str) -> str:
    """Build a submit cell that uses ``mograder.remote.submit()``.

    Returns source text for two marimo cells (username input + submit action).
    """
    return f'''\

@app.cell(hide_code=True)
def _(mo):
    {SUBMIT_MARKER}
    import os as _os
    mo.stop(_os.environ.get("MOGRADER_DASHBOARD") == "1")
    submit_username = mo.ui.text(label="Username", placeholder="Enter your username")
    submit_btn = mo.ui.run_button(label="Submit")
    mo.hstack([submit_username, submit_btn])
    return (submit_btn, submit_username)


@app.cell(hide_code=True)
def _(submit_btn, submit_username, mo):
    mo.stop(not submit_btn.value or not submit_username.value)
    from mograder.remote import submit as submit_fn
    submit_result = submit_fn("{server_url}", "{assignment_name}", __file__, submit_username.value)
    mo.callout(mo.md(f"**Submitted!** Status: {{submit_result}}"), kind="success")
    return


'''

process_file(source, output_dir, dry_run=False, validate_only=False, submit_url=None)

Process a single notebook file. Returns True on success.

Source code in src/mograder/grading/cells.py
def process_file(
    source: Path,
    output_dir: Path | None,
    dry_run: bool = False,
    validate_only: bool = False,
    submit_url: str | None = None,
) -> bool:
    """Process a single notebook file. Returns True on success."""
    lines = source.read_text().splitlines(keepends=True)

    errors = validate_markers(lines, str(source))
    if errors:
        for err in errors:
            print(f"ERROR: {err}", file=sys.stderr)
        return False

    n_solutions = count_markers(lines)
    if n_solutions == 0:
        print(f"SKIP: {source} (no solution markers found)")
        return True

    if validate_only:
        print(f"VALID: {source} ({n_solutions} solution blocks)")
        return True

    n_hidden = count_hidden_markers(lines)
    student_lines = strip_solutions(lines)
    student_lines = strip_hidden_tests(student_lines)
    student_lines = convert_markdown_cells(student_lines)

    if submit_url:
        assignment_name = source.parent.name
        submit_cell = build_submit_cell(submit_url, assignment_name)
        student_lines = _inject_before_main(student_lines, submit_cell)

    if dry_run:
        n_removed = len(lines) - len(student_lines)
        msg = f"DRY-RUN: {_rel(source)}{n_solutions} solution blocks stripped"
        if n_hidden:
            msg += f", {n_hidden} hidden test blocks stripped"
        msg += f", {n_removed} lines removed"
        print(msg)
        return True

    if output_dir is None:
        output_dir = Path("release")
    output_dir.mkdir(parents=True, exist_ok=True)
    dest = output_dir / source.name

    # Inject assignment metadata into PEP 723 block
    assignment_name = source.parent.name
    student_lines = _inject_assignment_metadata(student_lines, assignment_name)

    # Inject hidden-tests flag if any hidden test blocks were stripped
    if n_hidden > 0:
        student_lines = _inject_hidden_tests_metadata(student_lines)

    dest.write_text("".join(student_lines))

    # Inject cell hashes (needs parsed marimo IR, so operates on written text)
    text = dest.read_text()
    text = _inject_cell_hashes(text)
    dest.write_text(text)

    msg = f"OK: {_rel(source)}{_rel(dest)} ({n_solutions} solution blocks stripped"
    if n_hidden:
        msg += f", {n_hidden} hidden test blocks stripped"
    msg += ")"
    print(msg)
    return True

build_release_zip(release_dir)

Create a zip of student-facing release files, excluding artifacts.

Returns None (and removes any stale zip) when the directory contains only a single file — a zip that wraps one file adds no value.

Uses a fixed timestamp so the zip is reproducible across runs.

Source code in src/mograder/grading/cells.py
def build_release_zip(release_dir: Path) -> Path | None:
    """Create a zip of student-facing release files, excluding artifacts.

    Returns *None* (and removes any stale zip) when the directory contains
    only a single file — a zip that wraps one file adds no value.

    Uses a fixed timestamp so the zip is reproducible across runs.
    """
    zip_path = release_dir / f"{release_dir.name}.zip"
    _EXCLUDE_SUFFIXES = {".html", ".zip"}

    # Collect candidate files
    candidates = sorted(
        f
        for f in release_dir.iterdir()
        if f.is_file()
        and not f.name.startswith(".")
        and f.suffix not in _EXCLUDE_SUFFIXES
    )

    # Skip zip when there's only a single file (e.g. just the .py notebook)
    if len(candidates) <= 1:
        zip_path.unlink(missing_ok=True)
        return None

    # Fixed date_time for reproducible output (2025-01-01 00:00:00)
    _FIXED_TIME = (2025, 1, 1, 0, 0, 0)
    with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
        for f in candidates:
            info = zipfile.ZipInfo(f.name, date_time=_FIXED_TIME)
            info.compress_type = zipfile.ZIP_DEFLATED
            zf.writestr(info, f.read_bytes())
    return zip_path

extract_marking_scale(source_lines)

Extract the Marking Scale admonition from a source notebook.

Looks for /// details | Marking Scale ... /// block in markdown cells. Returns the markdown content (without the admonition wrapper), or None.

Source code in src/mograder/grading/cells.py
def extract_marking_scale(source_lines: list[str]) -> str | None:
    """Extract the Marking Scale admonition from a source notebook.

    Looks for ``/// details | Marking Scale`` ... ``///`` block in markdown cells.
    Returns the markdown content (without the admonition wrapper), or None.
    """
    import textwrap

    text = "".join(source_lines)
    match = re.search(
        r"///\s*details\s*\|\s*Marking Scale\s*\n(.*?)\n\s*///",
        text,
        re.DOTALL,
    )
    if not match:
        return None
    content = match.group(1)
    lines = content.splitlines()
    # Skip directive lines (e.g. "    type: info") and blank lines at start
    start = 0
    for i, line in enumerate(lines):
        stripped = line.strip()
        if stripped.startswith("type:") or not stripped:
            start = i + 1
        else:
            break
    body = "\n".join(lines[start:])
    return textwrap.dedent(body).strip() or None

parse_marks_metadata(source_lines)

Extract marks metadata from a notebook.

Reads _marks = {...} from the MARKS_MARKER cell. All question marks (both auto-checked and manual) must be listed in this single dict.

Returns None if no MARKS_MARKER cell found.

Source code in src/mograder/grading/cells.py
def parse_marks_metadata(source_lines: list[str]) -> dict[str, int | float] | None:
    """Extract marks metadata from a notebook.

    Reads ``_marks = {...}`` from the MARKS_MARKER cell. All question marks
    (both auto-checked and manual) must be listed in this single dict.

    Returns None if no MARKS_MARKER cell found.
    """
    text = "".join(source_lines)
    if MARKS_MARKER not in text:
        return None

    marker_idx = text.index(MARKS_MARKER)
    section = text[marker_idx:]

    dict_match = re.search(r"_marks\s*=\s*(\{[^}]+\})", section)
    if not dict_match:
        return None
    try:
        marks = ast.literal_eval(dict_match.group(1))
    except (ValueError, SyntaxError):
        return None

    return marks if marks else None

parse_auto_marks(source_lines)

Extract auto-scored marks from a verification cell with marks data.

Looks for _mograder_marks and _mograder_checks in the verification cell and computes sum of marks for PASS checks. Supports both 4-tuple format (label, status, earned_weight, total_weight) (fractional) and legacy 2-tuple format (label, status) (binary).

Returns None if no marks data found in the verification cell.

Source code in src/mograder/grading/cells.py
def parse_auto_marks(source_lines: list[str]) -> int | float | None:
    """Extract auto-scored marks from a verification cell with marks data.

    Looks for ``_mograder_marks`` and ``_mograder_checks`` in the verification
    cell and computes sum of marks for PASS checks.  Supports both 4-tuple
    format ``(label, status, earned_weight, total_weight)`` (fractional) and
    legacy 2-tuple format ``(label, status)`` (binary).

    Returns None if no marks data found in the verification cell.
    """
    text = "".join(source_lines)
    if VERIFICATION_MARKER not in text or "_mograder_marks" not in text:
        return None

    marker_idx = text.index(VERIFICATION_MARKER)
    section = text[marker_idx:]

    # Extract _mograder_marks dict
    marks_match = re.search(r"_mograder_marks\s*=\s*(\{[^}]+\})", section)
    if not marks_match:
        return None
    try:
        marks_dict = ast.literal_eval(marks_match.group(1))
    except (ValueError, SyntaxError):
        return None

    # Extract _mograder_checks list
    checks_match = re.search(r"_mograder_checks\s*=\s*\[(.*?)\]", section, re.DOTALL)
    if not checks_match:
        return None

    # Try 4-tuple format first: ("label", "status", earned_weight, total_weight)
    auto_mark = 0.0
    found_4tuple = False
    for m in re.finditer(
        r'\("([^"]+)",\s*"([^"]+)",\s*([0-9.]+),\s*([0-9.]+)\)',
        checks_match.group(1),
    ):
        found_4tuple = True
        label, status = m.group(1), m.group(2)
        ew, tw = float(m.group(3)), float(m.group(4))
        key = label.split(":")[0].strip()
        if key not in marks_dict:
            continue
        if tw > 0:
            auto_mark += round(marks_dict[key] * ew / tw, 1)
        elif status == "PASS":
            # Backward compat: tw=0 means old binary data
            auto_mark += marks_dict[key]

    if found_4tuple:
        return auto_mark

    # Fall back to 2-tuple format: ("label", "status")
    auto_mark_int = 0
    for m in re.finditer(r'\("([^"]+)",\s*"([^"]+)"\)', checks_match.group(1)):
        label, status = m.group(1), m.group(2)
        key = label.split(":")[0].strip()
        if status == "PASS" and key in marks_dict:
            auto_mark_int += marks_dict[key]

    return auto_mark_int

has_grading_cells(source_lines)

Detect if grading cells are already injected.

Source code in src/mograder/grading/cells.py
def has_grading_cells(source_lines: list[str]) -> bool:
    """Detect if grading cells are already injected."""
    text = "".join(source_lines)
    return VERIFICATION_MARKER in text or FEEDBACK_MARKER in text

inject_grading_cells(source_lines, checks, cell_errors=0, marks=None, source_check_keys=None)

Insert verification summary + marker feedback cells before if __name__.

Returns modified source lines. Idempotent: if grading cells already exist, returns the input unchanged.

When marks is provided, the verification cell includes a marks column and the feedback cell is pre-configured for manual-only grading.

source_check_keys, if given, determines which marks-dict keys are auto-graded (have a check() call in the source notebook). When omitted, keys are inferred from the student's executed checks — which may be incomplete if mo.stop guards prevented some checks from running.

Source code in src/mograder/grading/cells.py
def inject_grading_cells(
    source_lines: list[str],
    checks: list[CheckResult],
    cell_errors: int = 0,
    marks: dict[str, int | float] | None = None,
    source_check_keys: set[str] | None = None,
) -> list[str]:
    """Insert verification summary + marker feedback cells before ``if __name__``.

    Returns modified source lines. Idempotent: if grading cells already exist,
    returns the input unchanged.

    When ``marks`` is provided, the verification cell includes a marks column
    and the feedback cell is pre-configured for manual-only grading.

    ``source_check_keys``, if given, determines which marks-dict keys are
    auto-graded (have a ``check()`` call in the source notebook).  When omitted,
    keys are inferred from the student's executed checks — which may be
    incomplete if ``mo.stop`` guards prevented some checks from running.
    """
    if has_grading_cells(source_lines):
        return source_lines

    if marks is not None:
        # Compute auto marks (fractional) and manual available.
        # Prefer source_check_keys (from source notebook run) over student
        # checks, which may be incomplete due to mo.stop guards.
        check_keys = source_check_keys or {
            c.label.split(":")[0].strip() for c in checks
        }
        auto_mark = 0.0
        for c in checks:
            key = c.label.split(":")[0].strip()
            if key not in marks:
                continue
            avail = marks[key]
            if c.total_weight > 0:
                auto_mark += round(avail * c.earned_weight / c.total_weight, 1)
            elif c.status == "success":
                auto_mark += avail
        manual_available = sum(v for k, v in marks.items() if k not in check_keys)
        total_available = sum(marks.values())
        verification = _build_verification_cell(checks, cell_errors, marks)
        feedback = _build_feedback_cell(auto_mark, manual_available, total_available)
    else:
        verification = _build_verification_cell(checks, cell_errors)
        feedback = _build_feedback_cell()

    # Find the `if __name__` line
    insert_idx = None
    for i, line in enumerate(source_lines):
        if line.strip().startswith("if __name__"):
            insert_idx = i
            break

    if insert_idx is None:
        # Append at end
        insert_idx = len(source_lines)

    new_lines = (
        source_lines[:insert_idx]
        + (verification + feedback).splitlines(keepends=True)
        + source_lines[insert_idx:]
    )
    return new_lines

parse_marker_feedback(source_lines)

Extract _mark and _feedback from a graded notebook.

Looks for the MOGRADER: MARKER FEEDBACK marker and parses the variable assignments that follow it.

Returns (mark, feedback) where mark is None if not yet graded.

Source code in src/mograder/grading/cells.py
def parse_marker_feedback(source_lines: list[str]) -> tuple[int | None, str]:
    """Extract ``_mark`` and ``_feedback`` from a graded notebook.

    Looks for the MOGRADER: MARKER FEEDBACK marker and parses the variable
    assignments that follow it.

    Returns (mark, feedback) where mark is None if not yet graded.
    """
    text = "".join(source_lines)
    if FEEDBACK_MARKER not in text:
        return (None, "")

    # Find the feedback section
    marker_idx = text.index(FEEDBACK_MARKER)
    section = text[marker_idx:]

    # Parse _mark
    mark_match = re.search(r"_mark\s*=\s*(\d+|None)", section)
    mark = None
    if mark_match and mark_match.group(1) != "None":
        mark = int(mark_match.group(1))

    # Parse _feedback - try triple-quoted first, then single-line
    feedback = ""
    fb_match = re.search(r'_feedback\s*=\s*"""(.*?)"""', section, re.DOTALL)
    if not fb_match:
        fb_match = re.search(r"_feedback\s*=\s*'''(.*?)'''", section, re.DOTALL)
    if not fb_match:
        fb_match = re.search(r'_feedback\s*=\s*"((?:[^"\\]|\\.)*)"', section)
    if not fb_match:
        fb_match = re.search(r"_feedback\s*=\s*'((?:[^'\\]|\\.)*)'", section)
    if fb_match:
        feedback = fb_match.group(1)

    return (mark, feedback)

write_marker_feedback(file_path, mark, feedback)

Write _mark and _feedback values into a graded notebook.

The file must contain a MOGRADER: MARKER FEEDBACK marker cell. Always writes _feedback as a triple-quoted string.

Raises ValueError if the feedback marker is not found.

Source code in src/mograder/grading/cells.py
def write_marker_feedback(file_path: Path, mark: int | None, feedback: str) -> None:
    """Write ``_mark`` and ``_feedback`` values into a graded notebook.

    The file must contain a MOGRADER: MARKER FEEDBACK marker cell.
    Always writes ``_feedback`` as a triple-quoted string.

    Raises ValueError if the feedback marker is not found.
    """
    text = file_path.read_text()
    if FEEDBACK_MARKER not in text:
        raise ValueError(f"No {FEEDBACK_MARKER} found in {file_path}")

    # Replace _mark value
    mark_str = str(mark) if mark is not None else "None"
    text = re.sub(r"_mark\s*=\s*(\d+|None)", f"_mark = {mark_str}", text)

    # Escape triple quotes in feedback
    safe_feedback = feedback.replace('"""', r"\"\"\"")

    # Replace _feedback value — try triple-quoted patterns first, then single-line
    replacement = f'_feedback = """{safe_feedback}"""'

    # Try triple-double-quoted
    new_text, count = re.subn(
        r'_feedback\s*=\s*""".*?"""', replacement, text, count=1, flags=re.DOTALL
    )
    if count == 0:
        # Try triple-single-quoted
        new_text, count = re.subn(
            r"_feedback\s*=\s*'''.*?'''", replacement, text, count=1, flags=re.DOTALL
        )
    if count == 0:
        # Try double-quoted single-line
        new_text, count = re.subn(
            r'_feedback\s*=\s*"(?:[^"\\]|\\.)*"', replacement, text, count=1
        )
    if count == 0:
        # Try single-quoted single-line
        new_text, count = re.subn(
            r"_feedback\s*=\s*'(?:[^'\\]|\\.)*'", replacement, text, count=1
        )

    file_path.write_text(new_text)