diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..21e3a86 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,20 @@ +# Agent instructions for github_pm workspaces + +## Required checks before finishing any task + +- **`tox` must complete successfully** for every change that touches the Python backend (and should be run once you believe backend work is done). Run it from the **`backend`** directory: + + ```bash + cd backend && tox + ``` + + This runs the environments defined in `backend/pyproject.toml` (format, import order, lint, tests, coverage). Do **not** consider backend work complete while **`tox`** reports failures. + +- **Fix all lint failures and unit test failures** reported by those checks (and any other checks you ran) **before** stopping. A green **`tox`** run is the acceptance bar for backend changes. + +- For **frontend** (`frontend/`) changes, run **`npm test`** (and **`npm run format:check`** if you edited formatted sources) from `frontend/` and fix failures there as well when the task involves the UI or client code. + +## Notes + +- Use **`uv`** in the backend as described in the project `README.md` (e.g. `uv sync`, `uv run`). +- Prefer small, focused diffs; match existing style and patterns in both backend and frontend. diff --git a/backend/.env_sample b/backend/.env_sample index 518623c..ce49549 100644 --- a/backend/.env_sample +++ b/backend/.env_sample @@ -1,3 +1,7 @@ github_token= github_repo=/ app_name= +sdlc_feature_labels=enhancement +sdlc_bug_labels=bug +sdlc_docs_labels=documentation +sdlc_escape_label=escape diff --git a/backend/src/github_pm/api.py b/backend/src/github_pm/api.py index 625faf5..379df46 100644 --- a/backend/src/github_pm/api.py +++ b/backend/src/github_pm/api.py @@ -19,15 +19,17 @@ class Connector: - def __init__(self, github_token: str): + def __init__(self, github_token: str, *, github_repo: str | None = None): """Initialize a GitHub connection. Args: github_token: The GitHub Personal Access Token to use + github_repo: ``owner/name``; defaults to ``context.github_repo`` when omitted. """ self.github_token = github_token self.base_url = "https://api.github.com" - self.owner, self.repo = context.github_repo.split("/", maxsplit=1) + repo = github_repo if github_repo is not None else context.github_repo + self.owner, self.repo = repo.split("/", maxsplit=1) self.github = requests.session() self.github.headers.update( { @@ -40,7 +42,7 @@ def __init__(self, github_token: str): logger.info( "Initializing GitHub Connector service to %s/%s", self.base_url, - context.github_repo, + repo, ) def get(self, path: str, headers: dict[str, str] | None = None) -> dict: diff --git a/backend/src/github_pm/app.py b/backend/src/github_pm/app.py index 7d68fa3..e9c9c19 100644 --- a/backend/src/github_pm/app.py +++ b/backend/src/github_pm/app.py @@ -1,6 +1,7 @@ from fastapi import APIRouter, FastAPI from github_pm.api import api_router +from github_pm.sdlc_api import sdlc_router router = APIRouter() @@ -11,6 +12,7 @@ async def health(): router.include_router(api_router, prefix="/api/v1") +router.include_router(sdlc_router, prefix="/api/v1") app = FastAPI( title="GitHub Project Management API", diff --git a/backend/src/github_pm/context.py b/backend/src/github_pm/context.py index 8a0992b..0c76176 100644 --- a/backend/src/github_pm/context.py +++ b/backend/src/github_pm/context.py @@ -4,6 +4,15 @@ from pydantic_settings import BaseSettings, SettingsConfigDict +def _parse_sdlc_label_csv(value: object) -> frozenset[str]: + """Parse comma-separated label names into a lowercase set.""" + if isinstance(value, frozenset): + return value + if not isinstance(value, str): + return frozenset() + return frozenset(part.strip().lower() for part in value.split(",") if part.strip()) + + class Settings(BaseSettings): model_config = SettingsConfigDict( extra="ignore", @@ -14,6 +23,14 @@ class Settings(BaseSettings): app_name: Annotated[str, Field(default="GitHub Project Manager")] github_repo: Annotated[str, Field(default="vllm-project/guidellm")] github_token: Annotated[str, Field(default="")] + # SDLC KPIs: classify PRs (comma-separated; matched case-insensitively on label name). + # Stored as str so empty .env values do not break settings parsing. Use + # sdlc_metrics._parse_sdlc_label_csv for set semantics. + # Precedence when multiple match: bug fix > docs > feature (see sdlc_metrics.classify_pr_type). + sdlc_feature_labels: Annotated[str, Field(default="enhancement,feature")] + sdlc_bug_labels: Annotated[str, Field(default="bug")] + sdlc_docs_labels: Annotated[str, Field(default="documentation")] + sdlc_escape_label: Annotated[str, Field(default="escape")] context = Settings() diff --git a/backend/src/github_pm/sdlc_api.py b/backend/src/github_pm/sdlc_api.py new file mode 100644 index 0000000..297fef7 --- /dev/null +++ b/backend/src/github_pm/sdlc_api.py @@ -0,0 +1,65 @@ +"""SDLC KPI REST endpoints (GitHub-backed).""" + +from __future__ import annotations + +from typing import Annotated + +from fastapi import APIRouter, Depends, Query + +from github_pm import sdlc_service +from github_pm.api import connection, Connector +from github_pm.context import context +from github_pm.sdlc_models import ( + BugBacklogSeriesResponse, + DeliverySeriesResponse, + EscapedDefectSeriesResponse, +) + +sdlc_router = APIRouter(prefix="/sdlc", tags=["sdlc"]) + + +@sdlc_router.get("/delivery", response_model=DeliverySeriesResponse) +async def get_sdlc_delivery( + gitctx: Annotated[Connector, Depends(connection)], + weeks: Annotated[int, Query(ge=1, le=52)] = 4, + week_days: Annotated[int, Query(ge=1, le=90)] = 7, +): + """ + Delivery metrics: merged PR throughput, median cycle time, median time to first human review, + repeated for each of the last ``weeks`` windows of ``week_days`` days (oldest slice first). + + Each slice window is ``(slice_end - week_days, slice_end]`` in UTC. PRs authored by bots + (Dependabot, Mergify, etc.) are excluded from all delivery stats. Reviews exclude GitHub bots. + """ + return sdlc_service.compute_sdlc_delivery_series( + gitctx, context, weeks=weeks, week_days=week_days + ) + + +@sdlc_router.get("/escaped-defect-rate", response_model=EscapedDefectSeriesResponse) +async def get_escaped_defect_rate( + gitctx: Annotated[Connector, Depends(connection)], + weeks: Annotated[int, Query(ge=1, le=52)] = 4, + week_days: Annotated[int, Query(ge=1, le=90)] = 7, +): + """ + Escaped defect metrics per week (oldest slice first). Milestone rows match the cumulative + endpoint (next open line plus two previous minors), but counts are **incremental** within + each ``week_days`` window: PRs merged into the milestone and escape issues **created** in + that window (same prior-milestone attribution). Bot-authored PRs are excluded from denominators. + """ + return sdlc_service.compute_escaped_defect_rate_series( + gitctx, context, weeks=weeks, week_days=week_days + ) + + +@sdlc_router.get("/bug-backlog-delta", response_model=BugBacklogSeriesResponse) +async def get_bug_backlog_delta( + gitctx: Annotated[Connector, Depends(connection)], + weeks: Annotated[int, Query(ge=1, le=52)] = 4, + week_days: Annotated[int, Query(ge=1, le=90)] = 7, +): + """Bug issues opened, closed, and net per week (``weeks`` slices of ``week_days``, oldest first).""" + return sdlc_service.compute_bug_backlog_delta_series( + gitctx, context, weeks=weeks, week_days=week_days + ) diff --git a/backend/src/github_pm/sdlc_metrics.py b/backend/src/github_pm/sdlc_metrics.py new file mode 100644 index 0000000..1df1571 --- /dev/null +++ b/backend/src/github_pm/sdlc_metrics.py @@ -0,0 +1,578 @@ +"""SDLC KPI helpers: PR classification, size buckets, medians, GitHub search.""" + +from __future__ import annotations + +from collections.abc import Callable, Iterable, Mapping, Sequence +from datetime import datetime, timedelta, UTC +import re +from typing import Any, Literal +from urllib.parse import quote_plus + +from github_pm.context import _parse_sdlc_label_csv, Settings +from github_pm.logger import logger + +PRType = Literal["feature", "bug_fix", "docs", "unclassified"] +SizeBucket = Literal["tiny", "small", "medium", "large", "unknown"] + + +def utc_now() -> datetime: + return datetime.now(tz=UTC) + + +def window_start(days: int, now: datetime | None = None) -> datetime: + """Start of rolling window: `now - days`, UTC.""" + if now is None: + now = utc_now() + return now - timedelta(days=days) + + +def date_str(d: datetime) -> str: + """GitHub search date (YYYY-MM-DD) in UTC.""" + if d.tzinfo is None: + d = d.replace(tzinfo=UTC) + return d.astimezone(UTC).date().isoformat() + + +def classify_pr_type( + label_names: Iterable[str], + settings: Settings, +) -> PRType: + """Precedence: bug fix > docs > feature > unclassified.""" + lower = {n.lower() for n in label_names} + if lower & _parse_sdlc_label_csv(settings.sdlc_bug_labels): + return "bug_fix" + if lower & _parse_sdlc_label_csv(settings.sdlc_docs_labels): + return "docs" + if lower & _parse_sdlc_label_csv(settings.sdlc_feature_labels): + return "feature" + return "unclassified" + + +def size_bucket_from_lines(changed_lines: int | None) -> SizeBucket: + if changed_lines is None or changed_lines < 0: + return "unknown" + if changed_lines <= 10: + return "tiny" + if changed_lines <= 100: + return "small" + if changed_lines <= 500: + return "medium" + return "large" + + +def median_seconds(values: list[float]) -> float | None: + if not values: + return None + s = sorted(values) + n = len(s) + mid = n // 2 + if n % 2: + return s[mid] + return (s[mid - 1] + s[mid]) / 2.0 + + +def median_seconds_by_group( + items: list[tuple[PRType | str, SizeBucket, float]], + group_key: Callable[[tuple[PRType | str, SizeBucket, float]], str], +) -> dict[str, float | None]: + buckets: dict[str, list[float]] = {} + for row in items: + key = group_key(row) + buckets.setdefault(key, []).append(row[2]) + return {k: median_seconds(v) for k, v in buckets.items()} + + +def is_bot_user(login: str | None, user_type: str | None) -> bool: + if user_type == "Bot": + return True + if not login: + return False + if login.endswith("[bot]"): + return True + return login.endswith("-bot") + + +def is_pr_author_bot(author: dict[str, Any] | None) -> bool: + """True if the PR author is a bot (Dependabot, Mergify, GitHub Actions, etc.).""" + if not author: + return False + login = (author.get("login") or "").strip() + typename = author.get("__typename") or "" + rest_type = author.get("type") or "" + if typename == "Bot" or rest_type == "Bot": + return True + if not login: + return False + gh_type = rest_type or ("Bot" if typename == "Bot" else "User") + if is_bot_user(login, gh_type): + return True + low = login.lower() + for prefix in ( + "dependabot", + "mergify", + "renovate", + "greenkeeper", + "snyk-", + "pyup-", + "imgbot", + "codecov", + ): + if low.startswith(prefix): + return True + return False + + +def filter_out_bot_pr_nodes(nodes: list[dict[str, Any]]) -> list[dict[str, Any]]: + """Drop PRs authored by bots from GraphQL search results.""" + out: list[dict[str, Any]] = [] + for n in nodes: + if not n or n.get("number") is None: + continue + if is_pr_author_bot(n.get("author")): + continue + out.append(n) + return out + + +def first_human_review_submitted_at( + reviews: Sequence[Mapping[str, Any]], +) -> datetime | None: + """First review by submission time among non-bot authors.""" + candidates: list[datetime] = [] + for r in reviews: + user = r.get("user") or {} + login = user.get("login") + utype = user.get("type") + if is_bot_user(login, utype): + continue + raw = r.get("submitted_at") + if not raw: + continue + try: + # GitHub returns Z suffix + candidates.append(datetime.fromisoformat(raw.replace("Z", "+00:00"))) + except ValueError: + continue + if not candidates: + return None + return min(candidates) + + +def parse_github_ts(raw: str | None) -> datetime | None: + if not raw: + return None + try: + return datetime.fromisoformat(raw.replace("Z", "+00:00")) + except ValueError: + return None + + +def semver_tuple(title: str) -> tuple[int, int, int]: + """Parse `vX.Y.Z` title into a tuple for ordering. Caller must validate format.""" + parts = title[1:].split(".") + return (int(parts[0]), int(parts[1]), int(parts[2])) + + +def _best_closed_on_line( + closed_milestones: list[dict[str, Any]], + line: tuple[int, int], + version_match: re.Pattern[str], +) -> str | None: + """Latest closed milestone on the given (major, minor) line (highest patch).""" + best: tuple[str, tuple[int, int, int]] | None = None + for m in closed_milestones: + title = m.get("title") or "" + if not version_match.match(title): + continue + t = semver_tuple(title) + if (t[0], t[1]) != line: + continue + if best is None or t > best[1]: + best = (title, t) + return best[0] if best else None + + +def _lowest_open_on_line( + open_milestones: list[dict[str, Any]], + line: tuple[int, int], + version_match: re.Pattern[str], +) -> str | None: + """Lowest open semver on the given (major, minor) line.""" + titles: list[str] = [] + for m in open_milestones: + title = m.get("title") or "" + if not version_match.match(title): + continue + t = semver_tuple(title) + if (t[0], t[1]) == line: + titles.append(title) + if not titles: + return None + return min(titles, key=semver_tuple) + + +def select_escaped_defect_milestones( + open_milestones: list[dict[str, Any]], + closed_milestones: list[dict[str, Any]], + *, + version_match: re.Pattern[str], +) -> list[tuple[str, bool]]: + """Milestones for escaped defect rate: next open line + two previous minor lines. + + The **first open** milestone is the globally lowest open ``vX.Y.Z``. Included rows: + + * The two **previous** release lines ``(M, m-2)`` and ``(M, m-1)`` when they exist + (e.g. ``v0.5.x`` and ``v0.6.x`` when the next line is ``v0.7.x``), using the + **latest closed** milestone per line (highest patch). + * The **current** line ``(M, m)``: the lowest open milestone on that line + (pre-release); ``is_next_open`` is True for this row only. + + Rows are returned sorted by **semantic version** (ascending). Lines with no matching + closed milestone are omitted (except the open line, which requires an open milestone). + """ + open_semver_titles = [ + m["title"] for m in open_milestones if version_match.match(m.get("title") or "") + ] + if not open_semver_titles: + return [] + + next_open = min(open_semver_titles, key=semver_tuple) + major_v, minor_v, _patch_v = semver_tuple(next_open) + + lines_in_order: list[tuple[int, int]] = [] + if minor_v >= 2: + lines_in_order.append((major_v, minor_v - 2)) + if minor_v >= 1: + lines_in_order.append((major_v, minor_v - 1)) + lines_in_order.append((major_v, minor_v)) + + out: list[tuple[str, bool]] = [] + for line in lines_in_order: + if line == (major_v, minor_v): + title = _lowest_open_on_line(open_milestones, line, version_match) + if title is None: + continue + out.append((title, True)) + else: + title = _best_closed_on_line(closed_milestones, line, version_match) + if title is not None: + out.append((title, False)) + + out.sort(key=lambda row: semver_tuple(row[0])) + return out + + +def build_semver_milestone_previous_map( + open_milestones: list[dict[str, Any]], + closed_milestones: list[dict[str, Any]], + *, + version_match: re.Pattern[str], +) -> dict[str, str]: + """Map each semver milestone title to the immediately prior semver milestone. + + Ordering is global semver order across open and closed milestones. The + earliest milestone has no predecessor and is omitted from the map. + """ + titles: set[str] = set() + for m in open_milestones + closed_milestones: + t = m.get("title") or "" + if version_match.match(t): + titles.add(t) + ordered = sorted(titles, key=semver_tuple) + return {ordered[i]: ordered[i - 1] for i in range(1, len(ordered))} + + +def escape_labeled_issues_query(github_repo: str, escape_label: str) -> str: + """GitHub issue search query for issues carrying the escape label.""" + lab = escape_label.strip().lower() + if not lab: + lab = "escape" + label_tok = f'label:"{lab}"' if any(c in lab for c in " /") else f"label:{lab}" + return f"{repo_search_fragment(github_repo)} is:issue {label_tok}" + + +def rest_search_issue_items_paginated( + get_fn: Callable[[str], dict[str, Any]], + q: str, + *, + per_page: int = 100, +) -> list[dict[str, Any]]: + """Walk ``/search/issues`` pages (GitHub caps total results at 1000).""" + items: list[dict[str, Any]] = [] + page = 1 + while True: + path = f"/search/issues?q={quote_plus(q)}&per_page={per_page}&page={page}" + data = get_fn(path) + batch = data.get("items") or [] + items.extend(batch) + if len(batch) < per_page: + break + if len(items) >= 1000: + break + page += 1 + return items + + +def count_escape_issues_by_prior_milestone( + escape_items: Iterable[dict[str, Any]], + issue_milestone_to_prior: dict[str, str], + *, + version_match: re.Pattern[str], +) -> dict[str, int]: + """Attribute each escape issue to the milestone *before* its own milestone.""" + counts: dict[str, int] = {} + for item in escape_items: + ms = item.get("milestone") + if not isinstance(ms, dict): + continue + mt = ms.get("title") or "" + if not version_match.match(str(mt)): + continue + prior = issue_milestone_to_prior.get(str(mt)) + if prior is None: + continue + counts[prior] = counts.get(prior, 0) + 1 + return counts + + +def graphql_search_pull_requests( + post_graphql: Callable[[dict[str, Any]], dict[str, Any]], + search_query: str, + *, + page_size: int = 100, +) -> list[dict[str, Any]]: + """Paginate GitHub GraphQL search (PullRequest nodes).""" + nodes: list[dict[str, Any]] = [] + cursor: str | None = None + gql = """ + query($q: String!, $first: Int!, $after: String) { + search(query: $q, type: ISSUE, first: $first, after: $after) { + pageInfo { hasNextPage endCursor } + nodes { + ... on PullRequest { + number + createdAt + mergedAt + additions + deletions + labels(first: 30) { nodes { name } } + milestone { title } + author { + __typename + ... on User { login } + ... on Bot { login } + ... on Organization { login } + } + } + } + } + } + """ + while True: + payload = { + "query": gql, + "variables": { + "q": search_query, + "first": page_size, + "after": cursor, + }, + } + data = post_graphql(payload) + errors = data.get("errors") + if errors: + logger.error("GraphQL errors: %s", errors) + raise RuntimeError(f"GitHub GraphQL error: {errors!r}") + search = data.get("data", {}).get("search") or {} + batch = search.get("nodes") or [] + nodes.extend(filter_out_bot_pr_nodes(batch)) + page = search.get("pageInfo") or {} + if not page.get("hasNextPage"): + break + cursor = page.get("endCursor") + if not cursor: + break + return nodes + + +def repo_search_fragment(github_repo: str) -> str: + return f"repo:{github_repo}" + + +def merged_prs_query(github_repo: str, merged_since: datetime) -> str: + return ( + f"{repo_search_fragment(github_repo)} is:pr is:merged " + f"merged:>={date_str(merged_since)}" + ) + + +def opened_prs_query(github_repo: str, created_since: datetime) -> str: + return ( + f"{repo_search_fragment(github_repo)} is:pr " + f"created:>={date_str(created_since)}" + ) + + +def milestone_merged_prs_query(github_repo: str, milestone_title: str) -> str: + # Quote milestone title for spaces/special chars + safe = milestone_title.replace('"', "\\") + return f'{repo_search_fragment(github_repo)} is:pr is:merged milestone:"{safe}"' + + +def bug_issues_created_query( + github_repo: str, bug_labels_csv: str, since: datetime +) -> str: + """Issues with bug label(s) created on or after `since` (any state).""" + label_clause = _label_or_clause(_parse_sdlc_label_csv(bug_labels_csv)) + return ( + f"{repo_search_fragment(github_repo)} is:issue {label_clause} " + f"created:>={date_str(since)}" + ) + + +def bug_issues_closed_query( + github_repo: str, bug_labels_csv: str, since: datetime +) -> str: + label_clause = _label_or_clause(_parse_sdlc_label_csv(bug_labels_csv)) + return ( + f"{repo_search_fragment(github_repo)} is:issue is:closed {label_clause} " + f"closed:>={date_str(since)}" + ) + + +def bug_issues_created_query_between( + github_repo: str, bug_labels_csv: str, lo: datetime, hi: datetime +) -> str: + """Bug issues created with ``created`` in ``[date(lo), date(hi)]`` (UTC dates, inclusive).""" + label_clause = _label_or_clause(_parse_sdlc_label_csv(bug_labels_csv)) + a, b = date_str(lo), date_str(hi) + if a > b: + a, b = b, a + return ( + f"{repo_search_fragment(github_repo)} is:issue {label_clause} created:{a}..{b}" + ) + + +def bug_issues_closed_query_between( + github_repo: str, bug_labels_csv: str, lo: datetime, hi: datetime +) -> str: + """Closed bug issues with ``closed`` in ``[date(lo), date(hi)]`` (UTC dates, inclusive).""" + label_clause = _label_or_clause(_parse_sdlc_label_csv(bug_labels_csv)) + a, b = date_str(lo), date_str(hi) + if a > b: + a, b = b, a + return ( + f"{repo_search_fragment(github_repo)} is:issue is:closed {label_clause} " + f"closed:{a}..{b}" + ) + + +def _label_or_clause(labels: frozenset[str]) -> str: + if not labels: + return "" + parts = [ + f'label:"{lab}"' if any(c in lab for c in " /") else f"label:{lab}" + for lab in sorted(labels) + ] + if len(parts) == 1: + return parts[0] + return "(" + " OR ".join(parts) + ")" + + +def rest_search_total_count( + get_fn: Callable[[str], dict[str, Any]], + q: str, +) -> int: + """GET /search/issues total_count.""" + path = f"/search/issues?q={quote_plus(q)}" + data = get_fn(path) + return int(data.get("total_count", 0)) + + +def pr_row_from_graphql_node( + node: dict[str, Any], + settings: Settings, +) -> dict[str, Any]: + labels = [ln["name"] for ln in (node.get("labels") or {}).get("nodes") or []] + pr_type = classify_pr_type(labels, settings) + add = node.get("additions") + de = node.get("deletions") + try: + changed = (add or 0) + (de or 0) + except TypeError: + changed = 0 + bucket = size_bucket_from_lines(changed) + created = parse_github_ts(node.get("createdAt")) + merged = parse_github_ts(node.get("mergedAt")) + return { + "number": node["number"], + "pr_type": pr_type, + "size_bucket": bucket, + "changed_lines": changed, + "created_at": created, + "merged_at": merged, + "milestone_title": (node.get("milestone") or {}).get("title"), + } + + +def aggregate_throughput(rows: Sequence[Mapping[str, Any]]) -> dict[str, Any]: + by_type: dict[str, int] = { + "feature": 0, + "bug_fix": 0, + "docs": 0, + "unclassified": 0, + } + by_size: dict[str, int] = { + "tiny": 0, + "small": 0, + "medium": 0, + "large": 0, + "unknown": 0, + } + for r in rows: + pt = str(r["pr_type"]) + by_type[pt] = by_type.get(pt, 0) + 1 + sb = str(r["size_bucket"]) + by_size[sb] = by_size.get(sb, 0) + 1 + return {"total": len(rows), "by_pr_type": by_type, "by_pr_size": by_size} + + +def build_median_cycle_payload( + rows: Sequence[Mapping[str, Any]], +) -> dict[str, Any]: + triples: list[tuple[PRType | str, SizeBucket, float]] = [] + for r in rows: + c, m = r.get("created_at"), r.get("merged_at") + if not c or not m: + continue + dt = max(0.0, (m - c).total_seconds()) + triples.append((str(r["pr_type"]), str(r["size_bucket"]), dt)) # type: ignore[arg-type] + + overall = median_seconds([t[2] for t in triples]) + by_type = median_seconds_by_group(triples, lambda t: str(t[0])) + by_size = median_seconds_by_group(triples, lambda t: str(t[1])) + return { + "median_seconds": overall, + "by_pr_type": by_type, + "by_pr_size": by_size, + "pr_count": len(triples), + } + + +def build_first_review_payload( + rows_with_review: Sequence[tuple[Mapping[str, Any], float]], + eligible_count: int, +) -> dict[str, Any]: + triples: list[tuple[str, str, float]] = [] + for r, secs in rows_with_review: + triples.append((str(r["pr_type"]), str(r["size_bucket"]), secs)) + + overall = median_seconds([t[2] for t in triples]) + by_type = median_seconds_by_group(triples, lambda t: t[0]) + by_size = median_seconds_by_group(triples, lambda t: t[1]) + return { + "median_seconds": overall, + "by_pr_type": by_type, + "by_pr_size": by_size, + "included_pr_count": len(triples), + "eligible_pr_count": eligible_count, + } diff --git a/backend/src/github_pm/sdlc_models.py b/backend/src/github_pm/sdlc_models.py new file mode 100644 index 0000000..f986c55 --- /dev/null +++ b/backend/src/github_pm/sdlc_models.py @@ -0,0 +1,98 @@ +"""Pydantic models for SDLC KPI API responses.""" + +from __future__ import annotations + +from datetime import datetime + +from pydantic import BaseModel, Field + + +class ThroughputBreakdown(BaseModel): + total: int + by_pr_type: dict[str, int] + by_pr_size: dict[str, int] + + +class CycleTimePayload(BaseModel): + median_seconds: float | None + by_pr_type: dict[str, float | None] + by_pr_size: dict[str, float | None] + pr_count: int + + +class FirstReviewPayload(BaseModel): + median_seconds: float | None + by_pr_type: dict[str, float | None] + by_pr_size: dict[str, float | None] + included_pr_count: int + eligible_pr_count: int + + +class DeliveryResponse(BaseModel): + window_days: int + window_start: datetime + window_end: datetime + as_of: datetime + merged_pr_throughput: ThroughputBreakdown + median_pr_cycle_time: CycleTimePayload + median_time_to_first_review: FirstReviewPayload + + +class DeliverySeriesResponse(BaseModel): + weeks: int + week_days: int + slices: list[DeliveryResponse] + + +class EscapedDefectRow(BaseModel): + release: str + feature_prs: int + bug_fix_prs: int + docs_prs: int = Field( + default=0, + description="Merged documentation PRs in this milestone (denominator).", + ) + escape_issues: int = Field( + default=0, + description="Issues with the escape label, attributed to this milestone " + "(milestone on the issue is the *next* semver in repo order).", + ) + rate: float | None = Field( + default=None, + description="escape_issues / (feature + bug_fix + docs); null if denominator is 0", + ) + is_next_open: bool = Field( + default=False, + description="True for the lowest open semver milestone (pre-release / next target).", + ) + + +class EscapedDefectResponse(BaseModel): + """Per-slice escaped defect stats (incremental in ``(window_start, window_end]``).""" + + window_start: datetime | None = None + window_end: datetime | None = None + as_of: datetime + releases: list[EscapedDefectRow] + + +class EscapedDefectSeriesResponse(BaseModel): + weeks: int + week_days: int + slices: list[EscapedDefectResponse] + + +class BugBacklogResponse(BaseModel): + window_days: int + window_start: datetime + window_end: datetime + as_of: datetime + bugs_opened: int + bugs_closed: int + net: int + + +class BugBacklogSeriesResponse(BaseModel): + weeks: int + week_days: int + slices: list[BugBacklogResponse] diff --git a/backend/src/github_pm/sdlc_service.py b/backend/src/github_pm/sdlc_service.py new file mode 100644 index 0000000..5b11653 --- /dev/null +++ b/backend/src/github_pm/sdlc_service.py @@ -0,0 +1,395 @@ +"""Shared SDLC KPI computation (used by FastAPI routes). + +The standalone ``sdlc-report`` script (``scripts/sdlc_report.py``) mirrors this +logic without importing ``github_pm``; keep behavior aligned when changing +metrics. +""" + +from __future__ import annotations + +from datetime import datetime, timedelta +from typing import Any + +from github_pm import sdlc_metrics as sm +from github_pm.api import Connector, VERSION_MATCH +from github_pm.context import Settings +from github_pm.logger import logger +from github_pm.sdlc_models import ( + BugBacklogResponse, + BugBacklogSeriesResponse, + CycleTimePayload, + DeliveryResponse, + DeliverySeriesResponse, + EscapedDefectResponse, + EscapedDefectRow, + EscapedDefectSeriesResponse, + FirstReviewPayload, + ThroughputBreakdown, +) + + +def _post_graphql(gitctx: Connector): + return lambda payload: gitctx.post("/graphql", payload) + + +def _github_repo(gitctx: Connector) -> str: + return f"{gitctx.owner}/{gitctx.repo}" + + +def _filter_merged_in_slice( + rows: list[dict[str, Any]], + window_start: datetime, + window_end: datetime, +) -> list[dict[str, Any]]: + """Half-open on the left: merged in (window_start, window_end].""" + out: list[dict[str, Any]] = [] + for r in rows: + m = r.get("merged_at") + if not m: + continue + if window_start < m <= window_end: + out.append(r) + return out + + +def _filter_created_in_slice( + rows: list[dict[str, Any]], + window_start: datetime, + window_end: datetime, +) -> list[dict[str, Any]]: + """Half-open on the left: created in (window_start, window_end].""" + out: list[dict[str, Any]] = [] + for r in rows: + c = r.get("created_at") + if not c: + continue + if window_start < c <= window_end: + out.append(r) + return out + + +def compute_sdlc_delivery( + gitctx: Connector, + settings: Settings, + *, + days: int = 7, + now: datetime | None = None, +) -> DeliveryResponse: + """Delivery metrics (throughput, cycle time, time to first human review). + + Window is ``(now - days, now]`` (UTC): merged and created timestamps strictly after + ``now - days`` and on or before ``now``. + """ + repo = _github_repo(gitctx) + window_end = now if now is not None else sm.utc_now() + window_start = window_end - timedelta(days=days) + + merged_q = sm.merged_prs_query(repo, window_start) + merged_nodes = sm.graphql_search_pull_requests(_post_graphql(gitctx), merged_q) + merged_rows = [sm.pr_row_from_graphql_node(n, settings) for n in merged_nodes] + merged_rows = _filter_merged_in_slice(merged_rows, window_start, window_end) + + throughput = sm.aggregate_throughput(merged_rows) + cycle = sm.build_median_cycle_payload(merged_rows) + + opened_q = sm.opened_prs_query(repo, window_start) + opened_nodes = sm.graphql_search_pull_requests(_post_graphql(gitctx), opened_q) + opened_rows = [sm.pr_row_from_graphql_node(n, settings) for n in opened_nodes] + opened_rows = _filter_created_in_slice(opened_rows, window_start, window_end) + + rows_with_review: list[tuple[dict[str, Any], float]] = [] + for r in opened_rows: + num = r["number"] + try: + raw = gitctx.get( + f"/repos/{gitctx.owner}/{gitctx.repo}/pulls/{num}/reviews", + headers={"Accept": "application/vnd.github+json"}, + ) + except Exception as e: + logger.warning("Failed to fetch reviews for PR %s: %s", num, e) + continue + if not isinstance(raw, list): + logger.warning( + "Reviews for PR %s: expected JSON array, got %s", num, type(raw) + ) + continue + rev_list: list[dict[str, Any]] = raw + first_at = sm.first_human_review_submitted_at(rev_list) + created = r.get("created_at") + if first_at and created: + secs = max(0.0, (first_at - created).total_seconds()) + rows_with_review.append((r, secs)) + + review_payload = sm.build_first_review_payload( + rows_with_review, eligible_count=len(opened_rows) + ) + + return DeliveryResponse( + window_days=days, + window_start=window_start, + window_end=window_end, + as_of=window_end, + merged_pr_throughput=ThroughputBreakdown(**throughput), + median_pr_cycle_time=CycleTimePayload(**cycle), + median_time_to_first_review=FirstReviewPayload(**review_payload), + ) + + +def compute_sdlc_delivery_series( + gitctx: Connector, + settings: Settings, + *, + weeks: int, + week_days: int = 7, +) -> DeliverySeriesResponse: + """One delivery slice per week, oldest slice first.""" + now = sm.utc_now() + slices: list[DeliveryResponse] = [] + for i in range(weeks): + window_end = now - timedelta(days=i * week_days) + slices.append( + compute_sdlc_delivery(gitctx, settings, days=week_days, now=window_end) + ) + slices.reverse() + return DeliverySeriesResponse(weeks=weeks, week_days=week_days, slices=slices) + + +def compute_escaped_defect_rate( + gitctx: Connector, + settings: Settings, +) -> EscapedDefectResponse: + now = sm.utc_now() + repo = _github_repo(gitctx) + open_milestones = gitctx.get_paged( + f"/repos/{repo}/milestones?state=open", + headers={"Accept": "application/vnd.github+json"}, + ) + closed_milestones = gitctx.get_paged( + f"/repos/{repo}/milestones?state=closed", + headers={"Accept": "application/vnd.github+json"}, + ) + targets = sm.select_escaped_defect_milestones( + open_milestones, + closed_milestones, + version_match=VERSION_MATCH, + ) + issue_milestone_to_prior = sm.build_semver_milestone_previous_map( + open_milestones, + closed_milestones, + version_match=VERSION_MATCH, + ) + escape_q = sm.escape_labeled_issues_query(repo, settings.sdlc_escape_label) + escape_items = sm.rest_search_issue_items_paginated( + lambda path: gitctx.get(path), + escape_q, + ) + escapes_by_release = sm.count_escape_issues_by_prior_milestone( + escape_items, + issue_milestone_to_prior, + version_match=VERSION_MATCH, + ) + releases: list[EscapedDefectRow] = [] + post = _post_graphql(gitctx) + + for title, is_next_open in targets: + q = sm.milestone_merged_prs_query(repo, title) + nodes = sm.graphql_search_pull_requests(post, q) + rows = [sm.pr_row_from_graphql_node(n, settings) for n in nodes] + feat = sum(1 for r in rows if r["pr_type"] == "feature") + bugs = sum(1 for r in rows if r["pr_type"] == "bug_fix") + docs = sum(1 for r in rows if r["pr_type"] == "docs") + denom = feat + bugs + docs + esc = int(escapes_by_release.get(title, 0)) + rate = (esc / denom) if denom else None + releases.append( + EscapedDefectRow( + release=title, + feature_prs=feat, + bug_fix_prs=bugs, + docs_prs=docs, + escape_issues=esc, + rate=rate, + is_next_open=is_next_open, + ) + ) + + return EscapedDefectResponse(as_of=now, releases=releases) + + +def compute_escaped_defect_rate_series( + gitctx: Connector, + settings: Settings, + *, + weeks: int, + week_days: int = 7, +) -> EscapedDefectSeriesResponse: + """Incremental escaped defect stats per week (oldest slice first). + + Milestone selection uses **current** open/closed milestones. Denominators count + non-bot PRs merged into each target milestone with ``merged_at`` in the slice; + numerators count escape-labeled issues **created** in the slice whose milestone + maps to that row via ``issue_milestone_to_prior`` (same attribution as the + cumulative endpoint). + """ + now = sm.utc_now() + repo = _github_repo(gitctx) + open_milestones = gitctx.get_paged( + f"/repos/{repo}/milestones?state=open", + headers={"Accept": "application/vnd.github+json"}, + ) + closed_milestones = gitctx.get_paged( + f"/repos/{repo}/milestones?state=closed", + headers={"Accept": "application/vnd.github+json"}, + ) + targets = sm.select_escaped_defect_milestones( + open_milestones, + closed_milestones, + version_match=VERSION_MATCH, + ) + issue_milestone_to_prior = sm.build_semver_milestone_previous_map( + open_milestones, + closed_milestones, + version_match=VERSION_MATCH, + ) + escape_q = sm.escape_labeled_issues_query(repo, settings.sdlc_escape_label) + escape_items = sm.rest_search_issue_items_paginated( + lambda path: gitctx.get(path), + escape_q, + ) + post = _post_graphql(gitctx) + rows_by_title: dict[str, list[dict[str, Any]]] = {} + for title, _is_next_open in targets: + q = sm.milestone_merged_prs_query(repo, title) + nodes = sm.graphql_search_pull_requests(post, q) + rows_by_title[title] = [sm.pr_row_from_graphql_node(n, settings) for n in nodes] + + slices: list[EscapedDefectResponse] = [] + for i in range(weeks): + window_end = now - timedelta(days=i * week_days) + window_start = window_end - timedelta(days=week_days) + releases: list[EscapedDefectRow] = [] + for title, is_next_open in targets: + rows = rows_by_title[title] + in_win = [ + r + for r in rows + if r.get("merged_at") and window_start < r["merged_at"] <= window_end + ] + feat = sum(1 for r in in_win if r["pr_type"] == "feature") + bugs = sum(1 for r in in_win if r["pr_type"] == "bug_fix") + docs = sum(1 for r in in_win if r["pr_type"] == "docs") + denom = feat + bugs + docs + esc = 0 + for item in escape_items: + ts = sm.parse_github_ts(item.get("created_at")) + if ts is None or not (window_start < ts <= window_end): + continue + ms = item.get("milestone") + if not isinstance(ms, dict): + continue + mt = str(ms.get("title") or "") + if not VERSION_MATCH.match(mt): + continue + prior = issue_milestone_to_prior.get(mt) + if prior != title: + continue + esc += 1 + rate = (esc / denom) if denom else None + releases.append( + EscapedDefectRow( + release=title, + feature_prs=feat, + bug_fix_prs=bugs, + docs_prs=docs, + escape_issues=esc, + rate=rate, + is_next_open=is_next_open, + ) + ) + slices.append( + EscapedDefectResponse( + window_start=window_start, + window_end=window_end, + as_of=window_end, + releases=releases, + ) + ) + slices.reverse() + return EscapedDefectSeriesResponse(weeks=weeks, week_days=week_days, slices=slices) + + +def _count_bug_issues_in_slice( + gitctx: Connector, + repo: str, + labels: str, + window_start: datetime, + window_end: datetime, + *, + opened: bool, +) -> int: + """Count bug issues opened or closed in ``(window_start, window_end]`` (UTC).""" + q = ( + sm.bug_issues_created_query_between(repo, labels, window_start, window_end) + if opened + else sm.bug_issues_closed_query_between(repo, labels, window_start, window_end) + ) + items = sm.rest_search_issue_items_paginated( + lambda path: gitctx.get(path), + q, + ) + key = "created_at" if opened else "closed_at" + n = 0 + for item in items: + raw = item.get(key) + ts = sm.parse_github_ts(raw) if raw else None + if ts is not None and window_start < ts <= window_end: + n += 1 + return n + + +def compute_bug_backlog_delta( + gitctx: Connector, + settings: Settings, + *, + days: int = 7, + now: datetime | None = None, +) -> BugBacklogResponse: + window_end = now if now is not None else sm.utc_now() + window_start = window_end - timedelta(days=days) + repo = _github_repo(gitctx) + labels = settings.sdlc_bug_labels + + opened = _count_bug_issues_in_slice( + gitctx, repo, labels, window_start, window_end, opened=True + ) + closed = _count_bug_issues_in_slice( + gitctx, repo, labels, window_start, window_end, opened=False + ) + + return BugBacklogResponse( + window_days=days, + window_start=window_start, + window_end=window_end, + as_of=window_end, + bugs_opened=opened, + bugs_closed=closed, + net=opened - closed, + ) + + +def compute_bug_backlog_delta_series( + gitctx: Connector, + settings: Settings, + *, + weeks: int, + week_days: int = 7, +) -> BugBacklogSeriesResponse: + now = sm.utc_now() + slices: list[BugBacklogResponse] = [] + for i in range(weeks): + window_end = now - timedelta(days=i * week_days) + slices.append( + compute_bug_backlog_delta(gitctx, settings, days=week_days, now=window_end) + ) + slices.reverse() + return BugBacklogSeriesResponse(weeks=weeks, week_days=week_days, slices=slices) diff --git a/backend/tests/test_sdlc_api.py b/backend/tests/test_sdlc_api.py new file mode 100644 index 0000000..79fb834 --- /dev/null +++ b/backend/tests/test_sdlc_api.py @@ -0,0 +1,266 @@ +"""Tests for SDLC KPI API routes (mocked GitHub).""" + +from datetime import datetime, UTC +from unittest.mock import MagicMock, patch + +from fastapi.testclient import TestClient +import pytest + +from github_pm.api import connection +from github_pm.app import app + + +def _graphql_page(nodes: list, has_next: bool = False, cursor: str | None = "c1"): + return { + "data": { + "search": { + "pageInfo": { + "hasNextPage": has_next, + "endCursor": cursor if has_next else None, + }, + "nodes": nodes, + } + } + } + + +@pytest.fixture +def client(): + return TestClient(app) + + +@pytest.fixture +def mock_connector(): + gitctx = MagicMock() + gitctx.owner = "test" + gitctx.repo = "repo" + gitctx.base_url = "https://api.github.com" + + _human = {"__typename": "User", "login": "contributor"} + merged_node = { + "number": 10, + "createdAt": "2025-04-05T10:00:00Z", + "mergedAt": "2025-04-06T10:00:00Z", + "additions": 5, + "deletions": 5, + "labels": {"nodes": [{"name": "enhancement"}]}, + "milestone": None, + "author": _human, + } + opened_node = { + "number": 11, + "createdAt": "2025-04-05T12:00:00Z", + "mergedAt": None, + "additions": 2, + "deletions": 2, + "labels": {"nodes": [{"name": "bug"}]}, + "milestone": None, + "author": _human, + } + + def post_side(path: str, payload: dict): + q = payload["variables"]["q"] + if "is:merged" in q: + return _graphql_page([merged_node]) + return _graphql_page([opened_node]) + + gitctx.post.side_effect = post_side + + def get_side(path: str, headers=None): + if "/pulls/11/reviews" in path: + return [ + { + "user": {"login": "rev", "type": "User"}, + "submitted_at": "2025-04-05T14:00:00Z", + } + ] + if "/search/issues" in path: + return {"total_count": 3} + raise AssertionError(f"unexpected GET {path}") + + gitctx.get.side_effect = get_side + return gitctx + + +class TestSdlcDelivery: + def test_delivery_ok(self, client, mock_connector): + fixed = datetime(2025, 4, 10, 12, 0, 0, tzinfo=UTC) + + async def override_conn(): + yield mock_connector + + with ( + patch("github_pm.sdlc_metrics.utc_now", return_value=fixed), + patch("github_pm.sdlc_api.context") as ctx, + ): + ctx.github_repo = "test/repo" + ctx.sdlc_feature_labels = "enhancement" + ctx.sdlc_bug_labels = "bug" + ctx.sdlc_docs_labels = "documentation" + app.dependency_overrides[connection] = override_conn + try: + r = client.get("/api/v1/sdlc/delivery?weeks=1&week_days=7") + finally: + app.dependency_overrides.clear() + + assert r.status_code == 200 + data = r.json() + assert data["weeks"] == 1 + assert data["week_days"] == 7 + assert len(data["slices"]) == 1 + s0 = data["slices"][0] + assert s0["window_days"] == 7 + assert s0["merged_pr_throughput"]["total"] == 1 + assert s0["median_pr_cycle_time"]["pr_count"] == 1 + assert s0["median_time_to_first_review"]["eligible_pr_count"] == 1 + assert s0["median_time_to_first_review"]["included_pr_count"] == 1 + + +class TestEscapedDefect: + def test_escaped_defect_rate(self, client, mock_connector): + fixed = datetime(2025, 4, 10, 12, 0, 0, tzinfo=UTC) + _human = {"__typename": "User", "login": "contributor"} + feat = { + "number": 1, + "createdAt": "2025-04-01T10:00:00Z", + "mergedAt": "2025-04-08T10:00:00Z", + "additions": 1, + "deletions": 1, + "labels": {"nodes": [{"name": "enhancement"}]}, + "milestone": {"title": "v1.0.0"}, + "author": _human, + } + bug = { + "number": 2, + "createdAt": "2025-04-01T10:00:00Z", + "mergedAt": "2025-04-08T11:00:00Z", + "additions": 1, + "deletions": 1, + "labels": {"nodes": [{"name": "bug"}]}, + "milestone": {"title": "v1.0.0"}, + "author": _human, + } + doc_pr = { + "number": 3, + "createdAt": "2025-04-01T10:00:00Z", + "mergedAt": "2025-04-08T12:00:00Z", + "additions": 1, + "deletions": 1, + "labels": {"nodes": [{"name": "documentation"}]}, + "milestone": {"title": "v1.0.0"}, + "author": _human, + } + + mock_connector.post.side_effect = lambda path, payload: _graphql_page( + [feat, bug, doc_pr] + ) + # Next open v0.7.0 → include v0.5.1 + v0.6.0 closed + v0.7.0 open (sorted) + mock_connector.get_paged.side_effect = [ + [{"title": "v0.7.0", "state": "open", "number": 3}], + [ + {"title": "v0.5.0", "closed_at": "2025-01-01T00:00:00Z"}, + {"title": "v0.5.1", "closed_at": "2025-02-01T00:00:00Z"}, + {"title": "v0.6.0", "closed_at": "2025-03-01T00:00:00Z"}, + ], + ] + + def get_side(path: str, headers=None): + if "/search/issues" in path: + # Escape issue on v0.6.0 counts toward previous milestone v0.5.1 + return { + "items": [ + { + "milestone": {"title": "v0.6.0"}, + "created_at": "2025-04-06T10:00:00Z", + } + ], + "total_count": 1, + } + raise AssertionError(path) + + mock_connector.get.side_effect = get_side + + async def override_conn(): + yield mock_connector + + with ( + patch("github_pm.sdlc_metrics.utc_now", return_value=fixed), + patch("github_pm.sdlc_api.context") as ctx, + ): + ctx.github_repo = "test/repo" + ctx.sdlc_feature_labels = "enhancement" + ctx.sdlc_bug_labels = "bug" + ctx.sdlc_docs_labels = "documentation" + ctx.sdlc_escape_label = "escape" + app.dependency_overrides[connection] = override_conn + try: + r = client.get("/api/v1/sdlc/escaped-defect-rate?weeks=1&week_days=7") + finally: + app.dependency_overrides.clear() + + assert r.status_code == 200 + data = r.json() + assert data["weeks"] == 1 + assert len(data["slices"]) == 1 + slice0 = data["slices"][0] + assert "as_of" in slice0 + body = slice0["releases"] + assert len(body) == 3 + assert [r["release"] for r in body] == ["v0.5.1", "v0.6.0", "v0.7.0"] + assert [r["is_next_open"] for r in body] == [False, False, True] + assert body[0]["escape_issues"] == 1 + assert body[0]["docs_prs"] == 1 + assert body[0]["rate"] == pytest.approx(1.0 / 3.0) + assert body[1]["escape_issues"] == 0 + assert body[1]["docs_prs"] == 1 + assert body[1]["rate"] == pytest.approx(0.0) + assert body[2]["feature_prs"] == 1 + assert body[2]["bug_fix_prs"] == 1 + assert body[2]["docs_prs"] == 1 + assert body[2]["escape_issues"] == 0 + assert body[2]["rate"] == pytest.approx(0.0) + + +class TestBugBacklog: + def test_bug_backlog_delta(self, client, mock_connector): + opened_items = [ + {"created_at": "2025-04-05T10:00:00Z", "closed_at": None} for _ in range(4) + ] + closed_items = [ + {"created_at": "2025-03-01T00:00:00Z", "closed_at": "2025-04-06T10:00:00Z"} + ] + + def get_side(path: str, headers=None): + if "/search/issues" in path: + # q= is URL-encoded (e.g. is:closed -> is%3Aclosed) + if "is%3Aclosed" in path or "is:closed" in path: + return {"items": closed_items, "total_count": len(closed_items)} + return {"items": opened_items, "total_count": len(opened_items)} + raise AssertionError(path) + + mock_connector.get.side_effect = get_side + + async def override_conn(): + yield mock_connector + + fixed = datetime(2025, 4, 10, 12, 0, 0, tzinfo=UTC) + with ( + patch("github_pm.sdlc_metrics.utc_now", return_value=fixed), + patch("github_pm.sdlc_api.context") as ctx, + ): + ctx.github_repo = "test/repo" + ctx.sdlc_bug_labels = "bug" + app.dependency_overrides[connection] = override_conn + try: + r = client.get("/api/v1/sdlc/bug-backlog-delta?weeks=1&week_days=7") + finally: + app.dependency_overrides.clear() + + assert r.status_code == 200 + d = r.json() + assert d["weeks"] == 1 + assert len(d["slices"]) == 1 + s0 = d["slices"][0] + assert s0["bugs_opened"] == 4 + assert s0["bugs_closed"] == 1 + assert s0["net"] == 3 diff --git a/backend/tests/test_sdlc_metrics.py b/backend/tests/test_sdlc_metrics.py new file mode 100644 index 0000000..2d821b4 --- /dev/null +++ b/backend/tests/test_sdlc_metrics.py @@ -0,0 +1,186 @@ +"""Unit tests for sdlc_metrics helpers.""" + +from datetime import datetime, UTC +import re + +import pytest + +from github_pm import sdlc_metrics as sm +from github_pm.context import Settings + +_SEMVER = re.compile(r"^v\d+\.\d+\.\d+$") + + +@pytest.fixture +def settings() -> Settings: + return Settings( + app_name="t", + github_repo="o/r", + github_token="", + sdlc_feature_labels="enhancement", + sdlc_bug_labels="bug", + sdlc_docs_labels="documentation", + sdlc_escape_label="escape", + ) + + +def test_classify_precedence(settings: Settings): + assert sm.classify_pr_type(["bug", "documentation"], settings) == "bug_fix" + assert sm.classify_pr_type(["documentation", "enhancement"], settings) == "docs" + assert sm.classify_pr_type(["enhancement"], settings) == "feature" + assert sm.classify_pr_type([], settings) == "unclassified" + + +def test_size_bucket(): + assert sm.size_bucket_from_lines(0) == "tiny" + assert sm.size_bucket_from_lines(10) == "tiny" + assert sm.size_bucket_from_lines(11) == "small" + assert sm.size_bucket_from_lines(100) == "small" + assert sm.size_bucket_from_lines(101) == "medium" + assert sm.size_bucket_from_lines(500) == "medium" + assert sm.size_bucket_from_lines(501) == "large" + + +def test_median_seconds(): + assert sm.median_seconds([]) is None + assert sm.median_seconds([2.0]) == 2.0 + assert sm.median_seconds([1.0, 2.0, 3.0]) == 2.0 + assert sm.median_seconds([1.0, 2.0, 3.0, 4.0]) == 2.5 + + +def test_is_pr_author_bot(): + assert sm.is_pr_author_bot({"__typename": "Bot", "login": "dependabot[bot]"}) + assert sm.is_pr_author_bot({"__typename": "User", "login": "dependabot[bot]"}) + assert sm.is_pr_author_bot({"__typename": "User", "login": "mergify[bot]"}) + assert sm.is_pr_author_bot({"__typename": "User", "login": "mergify-test"}) + assert not sm.is_pr_author_bot({"__typename": "User", "login": "human"}) + assert not sm.is_pr_author_bot(None) + + +def test_filter_out_bot_pr_nodes(): + human = {"__typename": "User", "login": "alice"} + bot = {"__typename": "User", "login": "dependabot[bot]"} + nodes = [ + {"number": 1, "author": human}, + {"number": 2, "author": bot}, + {"number": 3, "author": human}, + ] + assert [n["number"] for n in sm.filter_out_bot_pr_nodes(nodes)] == [1, 3] + + +def test_first_human_review_skips_bot(): + reviews = [ + { + "user": {"login": "copilot-pull-request-reviewer[bot]", "type": "Bot"}, + "submitted_at": "2025-01-01T10:00:00Z", + }, + { + "user": {"login": "human", "type": "User"}, + "submitted_at": "2025-01-01T11:00:00Z", + }, + ] + t = sm.first_human_review_submitted_at(reviews) + assert t == datetime(2025, 1, 1, 11, 0, tzinfo=UTC) + + +def test_aggregate_throughput_and_cycle(): + c = datetime(2025, 4, 1, 12, 0, tzinfo=UTC) + m = datetime(2025, 4, 2, 12, 0, tzinfo=UTC) + rows = [ + { + "pr_type": "feature", + "size_bucket": "tiny", + "created_at": c, + "merged_at": m, + }, + { + "pr_type": "bug_fix", + "size_bucket": "large", + "created_at": c, + "merged_at": m, + }, + ] + agg = sm.aggregate_throughput(rows) + assert agg["total"] == 2 + assert agg["by_pr_type"]["feature"] == 1 + assert agg["by_pr_type"]["bug_fix"] == 1 + + cycle = sm.build_median_cycle_payload(rows) + assert cycle["pr_count"] == 2 + assert cycle["median_seconds"] == 86400.0 + + +def test_label_or_clause(): + assert "label:bug" in sm._label_or_clause(frozenset({"bug"})) + assert "OR" in sm._label_or_clause(frozenset({"a", "b"})) + + +def test_milestone_query_escapes_quotes(): + q = sm.milestone_merged_prs_query("o/r", 'v1.0.0"') + assert '\\"' in q or "v1.0.0" in q + + +def test_select_escaped_defect_milestones_three_lines_sorted(): + # Next open v0.7.0 → lines (0,5), (0,6), (0,7); pick latest closed on 0.5 / 0.6 + open_ms = [{"title": "v0.7.0"}] + closed_ms = [ + {"title": "v0.5.0", "closed_at": "2025-01-01T00:00:00Z"}, + {"title": "v0.5.1", "closed_at": "2025-02-01T00:00:00Z"}, + {"title": "v0.6.0", "closed_at": "2025-03-01T00:00:00Z"}, + ] + out = sm.select_escaped_defect_milestones( + open_ms, + closed_ms, + version_match=_SEMVER, + ) + assert out == [ + ("v0.5.1", False), + ("v0.6.0", False), + ("v0.7.0", True), + ] + + +def test_select_escaped_defect_milestones_skips_missing_closed_line(): + open_ms = [{"title": "v0.7.0"}] + closed_ms = [{"title": "v0.6.0", "closed_at": "2025-03-01T00:00:00Z"}] + out = sm.select_escaped_defect_milestones( + open_ms, + closed_ms, + version_match=_SEMVER, + ) + # No v0.5.x closed → only v0.6.0 and v0.7.0 + assert out == [("v0.6.0", False), ("v0.7.0", True)] + + +def test_select_escaped_defect_milestones_empty_when_no_open_semver(): + assert sm.select_escaped_defect_milestones([], [], version_match=_SEMVER) == [] + + +def test_build_semver_milestone_previous_map(): + open_m = [{"title": "v2.0.1"}] + closed_m = [{"title": "v1.0.0"}, {"title": "v2.0.0"}] + prev = sm.build_semver_milestone_previous_map( + open_m, closed_m, version_match=_SEMVER + ) + assert prev == {"v2.0.0": "v1.0.0", "v2.0.1": "v2.0.0"} + assert "v1.0.0" not in prev + + +def test_escape_labeled_issues_query(): + q = sm.escape_labeled_issues_query("acme/rocket", "escape") + assert "repo:acme/rocket" in q + assert "is:issue" in q + assert "label:escape" in q + + +def test_count_escape_issues_by_prior_milestone(): + prev = {"v2.0.0": "v1.0.0", "v2.0.1": "v2.0.0"} + items = [ + {"milestone": {"title": "v2.0.0"}}, + {"milestone": {"title": "v2.0.0"}}, + {"milestone": {"title": "v2.0.1"}}, + {"milestone": None}, + {"milestone": {"title": "backlog"}}, + ] + c = sm.count_escape_issues_by_prior_milestone(items, prev, version_match=_SEMVER) + assert c == {"v1.0.0": 2, "v2.0.0": 1} diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 45314a7..455461d 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -8,6 +8,9 @@ import { Alert, Bullseye, Button, + Tabs, + Tab, + TabTitleText, } from '@patternfly/react-core'; import { fetchMilestones, @@ -16,6 +19,7 @@ import { fetchAssignees, } from './services/api'; import MilestoneCard from './components/MilestoneCard'; +import SdlcKpisPanel from './components/SdlcKpisPanel'; import ManageMilestones from './components/ManageMilestones'; import ManageLabels from './components/ManageLabels'; import ManageSort from './components/ManageSort'; @@ -25,6 +29,9 @@ import assigneesCache from './utils/assigneesCache'; import iconImage from './assets/icon.png'; import './icon.css'; +const MAIN_VIEW_TAB_STORAGE_KEY = 'pmStatsMainViewTab'; +const VALID_MAIN_VIEW_TABS = new Set(['planning', 'sdlc']); + const App = () => { // Initialize with cached data if available const [milestones, setMilestones] = useState(milestonesCache.data || []); @@ -35,6 +42,17 @@ const App = () => { const [isManageMilestonesOpen, setIsManageMilestonesOpen] = useState(false); const [isManageLabelsOpen, setIsManageLabelsOpen] = useState(false); const [isManageSortOpen, setIsManageSortOpen] = useState(false); + const [activeViewTab, setActiveViewTab] = useState(() => { + try { + const saved = localStorage.getItem(MAIN_VIEW_TAB_STORAGE_KEY); + if (saved && VALID_MAIN_VIEW_TABS.has(saved)) { + return saved; + } + } catch (error) { + console.error('Failed to load main view tab from localStorage:', error); + } + return 'planning'; + }); // Load sort order from localStorage on mount const [sortOrder, setSortOrder] = useState(() => { @@ -166,6 +184,14 @@ const App = () => { } }, [sortOrder]); + useEffect(() => { + try { + localStorage.setItem(MAIN_VIEW_TAB_STORAGE_KEY, activeViewTab); + } catch (error) { + console.error('Failed to save main view tab to localStorage:', error); + } + }, [activeViewTab]); + const loadMilestones = () => { setLoading(true); setError(null); @@ -279,41 +305,59 @@ const App = () => { - - - {loading && ( - - - - )} - - {error && ( - - {error} - - )} + setActiveViewTab(key)} + aria-label="Main views" + mountOnEnter + style={{ marginTop: '1rem' }} + > + Planning} + > + {loading && ( + + + + )} - {!loading && !error && milestones.length === 0 && ( - - There are no milestones available. - - )} + {error && ( + + {error} + + )} - {!loading && !error && milestones.length > 0 && ( -
- {milestones.map((milestone) => ( - - ))} -
- )} + {!loading && !error && ( +
+ {milestones.length === 0 && ( + + There are no milestones available. + + )} + {milestones.length > 0 && + milestones.map((milestone) => ( + + ))} +
+ )} +
+ SDLC}> + + +
{ }); }); + const mockSdlcSeriesResponses = () => { + api.fetchSdlcDelivery.mockResolvedValue({ + weeks: 1, + week_days: 7, + slices: [ + { + window_days: 7, + window_start: '2025-04-03T12:00:00Z', + window_end: '2025-04-10T12:00:00Z', + as_of: '2025-04-10T12:00:00Z', + merged_pr_throughput: { + total: 0, + by_pr_type: { feature: 0, bug_fix: 0, docs: 0, unclassified: 0 }, + by_pr_size: { tiny: 0, small: 0, medium: 0, large: 0, unknown: 0 }, + }, + median_pr_cycle_time: { + median_seconds: null, + by_pr_type: {}, + by_pr_size: {}, + pr_count: 0, + }, + median_time_to_first_review: { + median_seconds: null, + by_pr_type: {}, + by_pr_size: {}, + included_pr_count: 0, + eligible_pr_count: 0, + }, + }, + ], + }); + api.fetchEscapedDefectRate.mockResolvedValue({ + weeks: 1, + week_days: 7, + slices: [ + { + window_start: '2025-04-03T12:00:00Z', + window_end: '2025-04-10T12:00:00Z', + as_of: '2025-04-10T12:00:00Z', + releases: [], + }, + ], + }); + api.fetchBugBacklogDelta.mockResolvedValue({ + weeks: 1, + week_days: 7, + slices: [ + { + window_days: 7, + window_start: '2025-04-03T12:00:00Z', + window_end: '2025-04-10T12:00:00Z', + as_of: '2025-04-10T12:00:00Z', + bugs_opened: 0, + bugs_closed: 0, + net: 0, + }, + ], + }); + }; + + it('restores main view tab from localStorage', async () => { + store.pmStatsMainViewTab = 'sdlc'; + api.fetchMilestones.mockResolvedValue([]); + mockSdlcSeriesResponses(); + + await act(async () => { + render(); + }); + + await waitFor(() => { + expect(screen.getByRole('tab', { name: /^SDLC$/i })).toHaveAttribute( + 'aria-selected', + 'true' + ); + }); + }); + + it('persists main view tab to localStorage when SDLC is selected', async () => { + const user = userEvent.setup(); + api.fetchMilestones.mockResolvedValue([]); + mockSdlcSeriesResponses(); + + await act(async () => { + render(); + }); + + await waitFor(() => { + expect(screen.getByRole('tab', { name: /^SDLC$/i })).toBeInTheDocument(); + }); + + await user.click(screen.getByRole('tab', { name: /^SDLC$/i })); + + await waitFor(() => { + expect(localStorageMock.setItem).toHaveBeenCalledWith( + 'pmStatsMainViewTab', + 'sdlc' + ); + }); + }); + it('loads sort order from localStorage on mount', async () => { const savedSortOrder = ['label1', 'label2', 'label3']; localStorageMock.setItem('issueSortOrder', JSON.stringify(savedSortOrder)); diff --git a/frontend/src/components/SdlcKpisPanel.jsx b/frontend/src/components/SdlcKpisPanel.jsx new file mode 100644 index 0000000..e3d5987 --- /dev/null +++ b/frontend/src/components/SdlcKpisPanel.jsx @@ -0,0 +1,648 @@ +// ai-generated: Cursor +import React, { useState, useCallback, useEffect } from 'react'; +import { + Card, + CardHeader, + CardTitle, + CardBody, + Spinner, + Alert, + Title, + Flex, + FlexItem, + TextInput, + Tooltip, +} from '@patternfly/react-core'; +import { + fetchSdlcDelivery, + fetchEscapedDefectRate, + fetchBugBacklogDelta, +} from '../services/api'; + +/** Format a duration as whole days and hours only (no minutes/seconds). */ +export const formatDaysAndHours = (sec) => { + if (sec == null || Number.isNaN(sec)) { + return '—'; + } + if (sec <= 0) { + return '0 h'; + } + const days = Math.floor(sec / 86400); + const hours = Math.floor((sec % 86400) / 3600); + const parts = []; + if (days > 0) { + parts.push(`${days} d`); + } + if (hours > 0) { + parts.push(`${hours} h`); + } + if (parts.length === 0) { + return '< 1 h'; + } + return parts.join(' '); +}; + +const formatRate = (r) => { + if (r == null || Number.isNaN(r)) { + return '—'; + } + return `${(r * 100).toFixed(1)}%`; +}; + +const formatWindowRange = (slice) => { + if (!slice?.window_start || !slice?.window_end) { + return ''; + } + const a = new Date(slice.window_start).toLocaleString(); + const b = new Date(slice.window_end).toLocaleString(); + return `${a} → ${b}`; +}; + +const numericMax = (values) => { + const nums = values + .map((v) => (typeof v === 'number' && !Number.isNaN(v) ? v : null)) + .filter((v) => v != null); + if (nums.length === 0) { + return 1; + } + const m = Math.max(...nums.map((n) => Math.abs(n))); + return m > 0 ? m : 1; +}; + +/** + * @param {object} props + * @param {Array<{ value: number | null, slice: object }>} props.bars + * @param {(v: number | null) => string} props.formatValue + * @param {(v: number | null, slice: object) => string} [props.formatHoverValue] + * @param {number} [props.chartHeightPx] + * @param {boolean} [props.compact] When true (default), bars stay grouped at fixed column width instead of stretching across the page. + * @param {number} [props.barColumnWidthPx] Width reserved per week column in compact mode. + */ +const WeekBarChart = ({ + bars, + formatValue, + formatHoverValue, + chartHeightPx = 120, + compact = true, + barColumnWidthPx = 48, +}) => { + const vals = bars.map((b) => + b.value != null && !Number.isNaN(b.value) ? b.value : null + ); + const maxV = numericMax(vals.filter((v) => v != null)); + const barFillWidth = Math.max(28, Math.min(40, barColumnWidthPx - 8)); + + return ( +
+ {bars.map((b, i) => { + const v = vals[i]; + const h = + v == null ? 0 : Math.max(4, (Math.abs(v) / maxV) * chartHeightPx); + const hoverMain = + formatHoverValue != null + ? formatHoverValue(v, b.slice) + : formatValue(v); + const tip = ( +
+
{hoverMain}
+
+ {formatWindowRange(b.slice)} +
+
+ ); + return ( + +
+
+
+
+
+ {formatValue(v)} +
+
+ + ); + })} +
+ ); +}; + +const breakdownBoxStyle = { + border: '1px solid var(--pf-v5-global--BorderColor--100, #d2d2d2)', + borderRadius: '4px', + padding: '0.75rem 0.85rem', + backgroundColor: 'var(--pf-v5-global--BackgroundColor--150, #f5f5f5)', + width: 'fit-content', + maxWidth: '100%', + flex: '0 1 auto', + minWidth: 'min(100%, 12rem)', +}; + +const BreakdownWeekCharts = ({ + title, + slices, + byKey, + chartKey, + formatBarValue = (v) => (v == null ? '—' : String(v)), +}) => ( +
+ + {title} + +
+ {byKey.map((key) => ( +
+ + {key} + + ({ + slice, + value: chartKey(slice, key), + }))} + formatValue={formatBarValue} + /> +
+ ))} +
+
+); + +const DEFAULT_WEEKS = 4; +const SDLC_WEEKS_STORAGE_KEY = 'pmStatsSdlcWeeks'; + +const readWeeksFromStorage = () => { + try { + const raw = localStorage.getItem(SDLC_WEEKS_STORAGE_KEY); + const n = parseInt(raw, 10); + if (Number.isFinite(n) && n >= 1 && n <= 52) { + return n; + } + } catch (error) { + console.error('Failed to read SDLC weeks from localStorage:', error); + } + return DEFAULT_WEEKS; +}; + +const SdlcKpisPanel = () => { + const [weeksInput, setWeeksInput] = useState(() => + String(readWeeksFromStorage()) + ); + const [weeks, setWeeks] = useState(() => readWeeksFromStorage()); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [delivery, setDelivery] = useState(null); + const [escaped, setEscaped] = useState(null); + const [backlog, setBacklog] = useState(null); + + const load = useCallback(() => { + setLoading(true); + setError(null); + return Promise.all([ + fetchSdlcDelivery(weeks, 7), + fetchEscapedDefectRate(weeks, 7), + fetchBugBacklogDelta(weeks, 7), + ]) + .then(([d, e, b]) => { + setDelivery(d); + setEscaped(e); + setBacklog(b); + setLoading(false); + }) + .catch((err) => { + setError(err.message || String(err)); + setLoading(false); + }); + }, [weeks]); + + useEffect(() => { + load(); + }, [load]); + + useEffect(() => { + try { + localStorage.setItem(SDLC_WEEKS_STORAGE_KEY, String(weeks)); + } catch (error) { + console.error('Failed to save SDLC weeks to localStorage:', error); + } + }, [weeks]); + + const applyWeeksFromInput = () => { + const n = parseInt(weeksInput, 10); + if (Number.isFinite(n) && n >= 1 && n <= 52) { + setWeeks(n); + setWeeksInput(String(n)); + } else { + setWeeksInput(String(weeks)); + } + }; + + const deliverySlices = delivery?.slices ?? []; + const escapedSlices = escaped?.slices ?? []; + const backlogSlices = backlog?.slices ?? []; + + const throughputKeys = deliverySlices[0] + ? [...Object.keys(deliverySlices[0].merged_pr_throughput?.by_pr_type || {})] + : []; + const sizeKeys = deliverySlices[0] + ? [...Object.keys(deliverySlices[0].merged_pr_throughput?.by_pr_size || {})] + : []; + + return ( + + + + + SDLC KPIs + + + + + setWeeksInput(val)} + onBlur={applyWeeksFromInput} + onKeyDown={(e) => { + if (e.key === 'Enter') { + applyWeeksFromInput(); + } + }} + style={{ width: '4.5rem' }} + /> + + + + + + {loading && ( +
+ +
+ )} + {error && ( + + {error} + + )} + {!loading && + !error && + delivery && + escaped && + backlog && + deliverySlices.length > 0 && ( +
+
+ + Delivery (per {delivery.week_days || 7}-day window) + +

+ {deliverySlices.length} week + {deliverySlices.length === 1 ? '' : 's'} (oldest left → newest + right). As of{' '} + {new Date( + deliverySlices[deliverySlices.length - 1]?.as_of + ).toLocaleString()} +

+ + + 1. Merged PR throughput (total) + + ({ + slice, + value: slice.merged_pr_throughput?.total ?? 0, + }))} + formatValue={(v) => String(v ?? 0)} + /> + + + slice.merged_pr_throughput?.by_pr_type?.[key] ?? 0 + } + /> + + slice.merged_pr_throughput?.by_pr_size?.[key] ?? 0 + } + /> + + + 2. Median PR cycle time (open → merge) + + ({ + slice, + value: slice.median_pr_cycle_time?.median_seconds, + }))} + formatValue={(v) => formatDaysAndHours(v)} + formatHoverValue={(v, slice) => + v == null + ? '—' + : `${formatDaysAndHours(v)} (${slice.median_pr_cycle_time?.pr_count ?? 0} PRs)` + } + /> + + slice.median_pr_cycle_time?.by_pr_type?.[key] ?? null + } + formatBarValue={(v) => formatDaysAndHours(v)} + /> + + slice.median_pr_cycle_time?.by_pr_size?.[key] ?? null + } + formatBarValue={(v) => formatDaysAndHours(v)} + /> + + + 3. Median time to first human review + +

+ Eligible / included PR counts vary by week (see tooltips). +

+ ({ + slice, + value: slice.median_time_to_first_review?.median_seconds, + }))} + formatValue={(v) => formatDaysAndHours(v)} + formatHoverValue={(v, slice) => + v == null + ? '—' + : `${formatDaysAndHours(v)} (eligible ${slice.median_time_to_first_review?.eligible_pr_count ?? 0}, with review ${slice.median_time_to_first_review?.included_pr_count ?? 0})` + } + /> + + slice.median_time_to_first_review?.by_pr_type?.[key] ?? null + } + formatBarValue={(v) => formatDaysAndHours(v)} + /> + + slice.median_time_to_first_review?.by_pr_size?.[key] ?? null + } + formatBarValue={(v) => formatDaysAndHours(v)} + /> +
+ +
+ + Quality + + + + 4. Escaped defect rate (per release milestone, incremental per + week) + +

+ Milestone rows follow current repo semver selection. Each bar + counts escapes created and PRs{' '} + merged into the milestone within that week. + As of{' '} + {new Date( + escapedSlices[escapedSlices.length - 1]?.as_of + ).toLocaleString()} + . +

+ {(escapedSlices[0]?.releases || []).map((row, idx) => ( +
+ + {row.release} + {row.is_next_open ? ( + <span style={{ color: '#6a6e73', fontWeight: 400 }}> + {' '} + (next open / pre-release) + </span> + ) : null} + +
+ Rate (escapes ÷ feature+bug+docs PRs merged in window) +
+ { + const r = slice.releases[idx]; + return { + slice, + value: r?.rate != null ? r.rate * 100 : null, + }; + })} + formatValue={(v) => + v == null ? '—' : `${Number(v).toFixed(1)}%` + } + formatHoverValue={(v, slice) => { + const r = slice.releases[idx]; + if (!r) { + return '—'; + } + return `${formatRate(r.rate)} — ${r.escape_issues ?? 0} escape(s); features ${r.feature_prs}, bugs ${r.bug_fix_prs}, docs ${r.docs_prs ?? 0}`; + }} + /> +
+ ))} +
+ +
+ + 5. Open bug backlog (opened / closed / net per week) + + + Opened + + ({ + slice, + value: slice.bugs_opened, + }))} + formatValue={(v) => String(v ?? 0)} + /> + + Closed + + ({ + slice, + value: slice.bugs_closed, + }))} + formatValue={(v) => String(v ?? 0)} + /> + + Net + + ({ + slice, + value: slice.net, + }))} + formatValue={(v) => String(v ?? 0)} + /> +
+
+ )} +
+
+ ); +}; + +export default SdlcKpisPanel; diff --git a/frontend/src/components/SdlcKpisPanel.test.jsx b/frontend/src/components/SdlcKpisPanel.test.jsx new file mode 100644 index 0000000..d088849 --- /dev/null +++ b/frontend/src/components/SdlcKpisPanel.test.jsx @@ -0,0 +1,165 @@ +// ai-generated: Cursor +import React from 'react'; +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { render, screen, waitFor } from '@testing-library/react'; +import userEvent from '@testing-library/user-event'; +import SdlcKpisPanel, { formatDaysAndHours } from './SdlcKpisPanel'; + +vi.mock('../services/api', () => ({ + fetchSdlcDelivery: vi.fn(), + fetchEscapedDefectRate: vi.fn(), + fetchBugBacklogDelta: vi.fn(), +})); + +import { + fetchSdlcDelivery, + fetchEscapedDefectRate, + fetchBugBacklogDelta, +} from '../services/api'; + +describe('formatDaysAndHours', () => { + it('formats days and hours', () => { + expect(formatDaysAndHours(86400)).toBe('1 d'); + expect(formatDaysAndHours(90000)).toBe('1 d 1 h'); + expect(formatDaysAndHours(3600)).toBe('1 h'); + expect(formatDaysAndHours(7200)).toBe('2 h'); + }); + + it('uses < 1 h when under one hour', () => { + expect(formatDaysAndHours(600)).toBe('< 1 h'); + }); + + it('handles null and zero', () => { + expect(formatDaysAndHours(null)).toBe('—'); + expect(formatDaysAndHours(0)).toBe('0 h'); + }); +}); + +describe('SdlcKpisPanel', () => { + beforeEach(() => { + vi.clearAllMocks(); + localStorage.removeItem('pmStatsSdlcWeeks'); + }); + + const deliverySlice = { + window_days: 7, + window_start: '2025-04-03T12:00:00Z', + window_end: '2025-04-10T12:00:00Z', + as_of: '2025-04-10T12:00:00Z', + merged_pr_throughput: { + total: 2, + by_pr_type: { feature: 2, bug_fix: 0, docs: 0, unclassified: 0 }, + by_pr_size: { tiny: 2, small: 0, medium: 0, large: 0, unknown: 0 }, + }, + median_pr_cycle_time: { + median_seconds: 3600, + by_pr_type: { feature: 3600 }, + by_pr_size: { tiny: 3600 }, + pr_count: 2, + }, + median_time_to_first_review: { + median_seconds: 600, + by_pr_type: { feature: 600 }, + by_pr_size: { tiny: 600 }, + included_pr_count: 1, + eligible_pr_count: 1, + }, + }; + + const makeSeriesMocks = (sliceCount) => { + const slices = Array.from({ length: sliceCount }, () => ({ + ...deliverySlice, + })); + fetchSdlcDelivery.mockResolvedValue({ + weeks: sliceCount, + week_days: 7, + slices, + }); + const escapeSlice = { + window_start: '2025-04-03T12:00:00Z', + window_end: '2025-04-10T12:00:00Z', + as_of: '2025-04-10T12:00:00Z', + releases: [ + { + release: 'v1.0.0', + feature_prs: 1, + bug_fix_prs: 0, + docs_prs: 0, + escape_issues: 0, + rate: 0, + is_next_open: true, + }, + ], + }; + const backlogSlice = { + window_days: 7, + window_start: '2025-04-03T12:00:00Z', + window_end: '2025-04-10T12:00:00Z', + as_of: '2025-04-10T12:00:00Z', + bugs_opened: 1, + bugs_closed: 0, + net: 1, + }; + fetchEscapedDefectRate.mockResolvedValue({ + weeks: sliceCount, + week_days: 7, + slices: Array.from({ length: sliceCount }, () => ({ ...escapeSlice })), + }); + fetchBugBacklogDelta.mockResolvedValue({ + weeks: sliceCount, + week_days: 7, + slices: Array.from({ length: sliceCount }, () => ({ ...backlogSlice })), + }); + }; + + it('renders title and loads metrics on mount', async () => { + makeSeriesMocks(1); + + render(); + expect(screen.getByText('SDLC KPIs')).toBeInTheDocument(); + + await waitFor(() => { + expect(fetchSdlcDelivery).toHaveBeenCalledWith(4, 7); + expect(fetchEscapedDefectRate).toHaveBeenCalledWith(4, 7); + expect(fetchBugBacklogDelta).toHaveBeenCalledWith(4, 7); + }); + + await waitFor(() => { + expect(screen.getByText(/Merged PR throughput/i)).toBeInTheDocument(); + }); + }); + + it('loads week count from localStorage', async () => { + localStorage.setItem('pmStatsSdlcWeeks', '8'); + makeSeriesMocks(8); + + render(); + + await waitFor(() => { + expect(fetchSdlcDelivery).toHaveBeenCalledWith(8, 7); + expect(fetchEscapedDefectRate).toHaveBeenCalledWith(8, 7); + expect(fetchBugBacklogDelta).toHaveBeenCalledWith(8, 7); + }); + }); + + it('persists week count to localStorage when applied', async () => { + const user = userEvent.setup(); + makeSeriesMocks(4); + + render(); + + await waitFor(() => { + expect(screen.getByLabelText(/weeks/i)).toBeInTheDocument(); + }); + + const input = screen.getByLabelText(/weeks/i); + await user.clear(input); + await user.type(input, '6'); + await user.tab(); + + await waitFor(() => { + expect(localStorage.getItem('pmStatsSdlcWeeks')).toBe('6'); + }); + expect(fetchSdlcDelivery).toHaveBeenLastCalledWith(6, 7); + }); +}); diff --git a/frontend/src/services/api.js b/frontend/src/services/api.js index 992bed6..2017df7 100644 --- a/frontend/src/services/api.js +++ b/frontend/src/services/api.js @@ -199,3 +199,47 @@ export const removeIssueAssignees = async (issueNumber, assignees) => { } return response.json(); }; + +export const fetchSdlcDelivery = async (weeks = 4, weekDays = 7) => { + const params = new URLSearchParams({ + weeks: String(weeks), + week_days: String(weekDays), + }); + const response = await fetch(`${API_BASE}/sdlc/delivery?${params}`); + if (!response.ok) { + throw new Error( + `Failed to fetch SDLC delivery metrics: ${response.statusText}` + ); + } + return response.json(); +}; + +export const fetchEscapedDefectRate = async (weeks = 4, weekDays = 7) => { + const params = new URLSearchParams({ + weeks: String(weeks), + week_days: String(weekDays), + }); + const response = await fetch( + `${API_BASE}/sdlc/escaped-defect-rate?${params}` + ); + if (!response.ok) { + throw new Error( + `Failed to fetch escaped defect rate: ${response.statusText}` + ); + } + return response.json(); +}; + +export const fetchBugBacklogDelta = async (weeks = 4, weekDays = 7) => { + const params = new URLSearchParams({ + weeks: String(weeks), + week_days: String(weekDays), + }); + const response = await fetch(`${API_BASE}/sdlc/bug-backlog-delta?${params}`); + if (!response.ok) { + throw new Error( + `Failed to fetch bug backlog delta: ${response.statusText}` + ); + } + return response.json(); +}; diff --git a/frontend/src/services/api.test.js b/frontend/src/services/api.test.js index 8ba18ff..585d93a 100644 --- a/frontend/src/services/api.test.js +++ b/frontend/src/services/api.test.js @@ -5,6 +5,9 @@ import { fetchIssues, fetchComments, fetchProject, + fetchSdlcDelivery, + fetchEscapedDefectRate, + fetchBugBacklogDelta, } from './api'; describe('api', () => { @@ -114,4 +117,56 @@ describe('api', () => { await expect(fetchProject()).rejects.toThrow('Failed to fetch project'); }); }); + + describe('fetchSdlcDelivery', () => { + it('requests delivery metrics with default weeks', async () => { + global.fetch.mockResolvedValue({ + ok: true, + json: async () => ({ weeks: 4, week_days: 7, slices: [] }), + }); + const result = await fetchSdlcDelivery(); + expect(result.weeks).toBe(4); + expect(global.fetch).toHaveBeenCalledWith( + '/api/v1/sdlc/delivery?weeks=4&week_days=7' + ); + }); + + it('throws on failure', async () => { + global.fetch.mockResolvedValue({ ok: false, statusText: 'Bad Gateway' }); + await expect(fetchSdlcDelivery(8)).rejects.toThrow( + 'Failed to fetch SDLC delivery metrics' + ); + expect(global.fetch).toHaveBeenCalledWith( + '/api/v1/sdlc/delivery?weeks=8&week_days=7' + ); + }); + }); + + describe('fetchEscapedDefectRate', () => { + it('fetches escaped defect rate', async () => { + const body = { weeks: 4, week_days: 7, slices: [] }; + global.fetch.mockResolvedValue({ + ok: true, + json: async () => body, + }); + const result = await fetchEscapedDefectRate(); + expect(result).toEqual(body); + expect(global.fetch).toHaveBeenCalledWith( + '/api/v1/sdlc/escaped-defect-rate?weeks=4&week_days=7' + ); + }); + }); + + describe('fetchBugBacklogDelta', () => { + it('requests bug backlog delta', async () => { + global.fetch.mockResolvedValue({ + ok: true, + json: async () => ({ weeks: 4, slices: [] }), + }); + await fetchBugBacklogDelta(); + expect(global.fetch).toHaveBeenCalledWith( + '/api/v1/sdlc/bug-backlog-delta?weeks=4&week_days=7' + ); + }); + }); });