Use migration history for template issue summaries

This commit is contained in:
Paul Huliganga 2026-04-23 10:01:24 -04:00
parent 447a89923a
commit c22d26bcf6
3 changed files with 165 additions and 0 deletions

View File

@ -225,6 +225,20 @@ Single-page app in `web/static/`. No build step — plain HTML + ES modules.
CSS uses DocuSign 2024 brand design tokens defined in `css/tokens.css`.
### Template Issue Summary
The Templates and Issues & Warnings pages use `/api/templates/status`. A
template is shown as `Clean` only when all of these are empty:
- validation `blockers`
- validation `warnings`
- composition `field_issues`
On the web server, migration downloads are temporary. If no persistent
`downloads/` folder exists for re-analysis, `/api/templates/status` falls back
to the current browser session's `migration-output/.history.json` records so
field issues discovered during migration still appear in the Templates summary.
---
## Security Design

View File

@ -286,3 +286,49 @@ def test_status_includes_field_issues_when_template_has_mapping_caveats(tmp_path
t = resp.json()["templates"][0]
assert t["analysis_status"] == "analyzed"
assert any(issue["code"] == "HIDE_ACTION" for issue in t["field_issues"])
@respx.mock
def test_status_uses_history_field_issues_when_download_is_not_persistent(tmp_path, monkeypatch):
"""Server-side temp downloads are gone after migration, so status falls back to history."""
import json
import web.routers.templates as templates_module
history_path = tmp_path / ".history.json"
history_path.write_text(json.dumps([
{
"timestamp": "2026-04-23T12:00:00Z",
"owner_session_id": "legacy",
"adobe_template_id": "adobe-history",
"adobe_template_name": "History Template",
"warnings": ["Skipped: template already exists (overwrite_if_exists=false)"],
"blockers": [],
"field_issues": [
{
"code": "FIELD_TYPE_SKIPPED",
"field_name": "Image 1",
"message": "Image 1 was skipped",
"severity": "warning",
}
],
}
]))
monkeypatch.setattr(templates_module, "_HISTORY_FILE", str(history_path))
respx.get(f"{ADOBE_BASE}/libraryDocuments").mock(
return_value=httpx.Response(200, json={
"libraryDocumentList": [
{"id": "adobe-history", "name": "History Template", "modifiedDate": "2026-04-10"},
]
})
)
respx.get(f"{DS_BASE}/v2.1/accounts/{DS_ACCOUNT}/templates").mock(
return_value=httpx.Response(200, json={"envelopeTemplates": []})
)
resp = client.get("/api/templates/status", cookies={_COOKIE_NAME: _adobe_session()})
assert resp.status_code == 200
t = resp.json()["templates"][0]
assert t["analysis_status"] == "history"
assert t["warnings"] == []
assert t["field_issues"][0]["code"] == "FIELD_TYPE_SKIPPED"

View File

@ -6,6 +6,8 @@ Computes per-template migration status for the side-by-side UI.
"""
import asyncio
import json
import os
from datetime import datetime, timezone
from pathlib import Path
import tempfile
@ -21,6 +23,10 @@ from web.session import get_session
router = APIRouter()
_HISTORY_FILE = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..", "migration-output", ".history.json"
))
def _require_adobe(session: dict) -> Optional[JSONResponse]:
if not session.get("adobe_access_token"):
@ -162,6 +168,13 @@ async def template_status(request: Request):
status = "needs_update" if adobe_modified > ds_modified else "migrated"
analysis = _get_template_analysis(t.get("id", ""), name)
if not _has_analysis_issues(analysis):
history_analysis = _get_history_analysis(
t.get("id", ""),
name,
session.get("_session_id") or "legacy",
)
analysis = _merge_analysis(analysis, history_analysis)
results.append({
"adobe_id": t.get("id"),
@ -235,6 +248,98 @@ def _find_downloaded_template(template_id: str, template_name: str) -> Path | No
return next((c for c in candidates if c.is_dir()), None)
def _get_history_analysis(template_id: str, template_name: str, session_scope: str) -> dict:
"""
Return the latest issue details captured during migration for this template.
The production web migration flow downloads Adobe template data to a temp
directory, so the Templates page may not have persistent local downloads to
re-analyze. Migration history is the source of truth for issues discovered
during an actual migration attempt.
"""
analysis = {
"blockers": [],
"warnings": [],
"field_issues": [],
"status": "not_found",
}
matching_records = [
record for record in _load_history()
if record.get("owner_session_id", "legacy") == session_scope
and (
record.get("adobe_template_id") == template_id
or record.get("adobe_template_name") == template_name
)
]
if not matching_records:
return analysis
matching_records.sort(key=lambda record: record.get("timestamp", ""), reverse=True)
for record in matching_records:
blockers = record.get("blockers") or []
warnings = _template_warnings(record.get("warnings") or [])
field_issues = record.get("field_issues") or []
if blockers or warnings or field_issues:
analysis["blockers"] = blockers
analysis["warnings"] = warnings
analysis["field_issues"] = field_issues
analysis["status"] = "history"
return analysis
analysis["status"] = "history_clean"
return analysis
def _load_history() -> list:
if not os.path.exists(_HISTORY_FILE):
return []
try:
with open(_HISTORY_FILE, encoding="utf-8") as f:
return json.load(f)
except Exception:
return []
def _template_warnings(warnings: list[str]) -> list[str]:
"""Remove operational migration messages that should not make a template look risky."""
return [
warning for warning in warnings
if not str(warning).startswith("Skipped: template already exists")
]
def _has_analysis_issues(analysis: dict) -> bool:
return bool(analysis["blockers"] or analysis["warnings"] or analysis["field_issues"])
def _merge_analysis(primary: dict, fallback: dict) -> dict:
if fallback["status"] in ("not_found", "history_clean"):
return primary
merged = {
"blockers": _dedupe([*primary["blockers"], *fallback["blockers"]]),
"warnings": _dedupe([*primary["warnings"], *fallback["warnings"]]),
"field_issues": _dedupe_field_issues([*primary["field_issues"], *fallback["field_issues"]]),
"status": fallback["status"] if primary["status"] == "not_downloaded" else primary["status"],
}
return merged
def _dedupe_field_issues(items: list[dict]) -> list[dict]:
seen = set()
result = []
for item in items:
key = (
item.get("code"),
item.get("field_name"),
item.get("message"),
)
if key in seen:
continue
seen.add(key)
result.append(item)
return result
def _dedupe(items: list[str]) -> list[str]:
seen = set()
result = []