test: 467 -> 574 Tests (+107) — DDD, abgeordnetenwatch, monitoring, v2, Bug-Regressions
Neue Tests in dieser Migration:
- test_database.py (Merkliste-CRUD, Subscriptions, abgeordnetenwatch-Joins)
- test_clustering.py (82% Coverage)
- test_drucksache_typen.py (100%)
- test_mail.py (86%)
- test_monitoring.py (23 Tests)
- test_abgeordnetenwatch.py (23 Tests, inkl. Drucksache-Extraction)
- test_redline_parser.py (20 Tests fuer §INS§/§DEL§-Marker)
- test_bug_regressions.py (PRAGMA, JWT-azp, CDU-PDF, PFLICHT-FRAKTIONEN, NRW-Titel)
- test_embeddings_v3_v4.py (WRITE/READ-Pattern)
- test_wahlprogramm_check.py (#128)
- test_wahlprogramm_fetch.py (#138)
- test_antrag/bewertung/abonnement_repository.py + test_llm_bewerter.py (DDD)
- test_domain_behavior.py (5 Domain-Methoden boundary tests)
- tests/e2e/test_ui.py (Playwright)
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-25 20:55:57 +02:00
|
|
|
"""Tests für LlmBewerter-Port und QwenBewerter-Adapter (ADR 0008).
|
|
|
|
|
|
|
|
|
|
Der Adapter wird mit einem Fake-Client getestet — kein Netzwerk, kein
|
|
|
|
|
``openai``-Paket. Retry-Semantik (Temperatur steigt um 0.1 pro Versuch)
|
|
|
|
|
ist hier explizit getestet, damit die Migration die Semantik nicht
|
|
|
|
|
still verändert.
|
|
|
|
|
"""
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
import asyncio
|
|
|
|
|
import json
|
|
|
|
|
import types
|
|
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
from app.adapters.qwen_bewerter import QwenBewerter, _strip_markdown_fences
|
|
|
|
|
from app.ports.llm_bewerter import LlmBewerter, LlmRequest
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _run(coro):
|
|
|
|
|
return asyncio.get_event_loop().run_until_complete(coro)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _make_fake_client(responses: list[str]):
|
|
|
|
|
"""Produziert einen Fake-OpenAI-Client, der pro Call einen Response aus
|
|
|
|
|
der Liste liefert und Metadaten (Temperatur) aufzeichnet."""
|
|
|
|
|
calls: list[dict] = []
|
|
|
|
|
|
|
|
|
|
class FakeCompletions:
|
|
|
|
|
async def create(self, **kwargs):
|
|
|
|
|
calls.append(dict(kwargs))
|
|
|
|
|
idx = len(calls) - 1
|
|
|
|
|
content = responses[min(idx, len(responses) - 1)]
|
|
|
|
|
return types.SimpleNamespace(
|
|
|
|
|
choices=[types.SimpleNamespace(
|
|
|
|
|
message=types.SimpleNamespace(content=content)
|
|
|
|
|
)]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
class FakeChat:
|
|
|
|
|
completions = FakeCompletions()
|
|
|
|
|
|
|
|
|
|
class FakeClient:
|
|
|
|
|
chat = FakeChat()
|
|
|
|
|
|
|
|
|
|
return FakeClient(), calls
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ─── Strip-Fences ──────────────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
class TestStripMarkdownFences:
|
|
|
|
|
def test_plain_json_unchanged(self):
|
|
|
|
|
assert _strip_markdown_fences('{"a": 1}') == '{"a": 1}'
|
|
|
|
|
|
|
|
|
|
def test_json_fence(self):
|
|
|
|
|
assert _strip_markdown_fences('```json\n{"a": 1}\n```') == '{"a": 1}'
|
|
|
|
|
|
|
|
|
|
def test_plain_fence(self):
|
|
|
|
|
assert _strip_markdown_fences('```\n{"a": 1}\n```') == '{"a": 1}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ─── Protocol-Konformität ──────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
class TestProtocol:
|
|
|
|
|
def test_qwen_implements_llm_bewerter(self):
|
|
|
|
|
# runtime_checkable Protocol — Method bewerte existiert
|
|
|
|
|
qb = QwenBewerter(api_key="x", base_url="y", client=object())
|
|
|
|
|
assert isinstance(qb, LlmBewerter)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ─── QwenBewerter mit FakeClient ───────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
class TestQwenBewerterHappyPath:
|
|
|
|
|
def test_single_successful_call(self):
|
|
|
|
|
fake, calls = _make_fake_client(['{"gwoeScore": 7.0}'])
|
|
|
|
|
qb = QwenBewerter(api_key="x", base_url="y", client=fake)
|
|
|
|
|
request = LlmRequest(system_prompt="sys", user_prompt="usr")
|
|
|
|
|
result = _run(qb.bewerte(request))
|
|
|
|
|
assert result == {"gwoeScore": 7.0}
|
|
|
|
|
assert len(calls) == 1
|
|
|
|
|
assert calls[0]["temperature"] == pytest.approx(0.3)
|
|
|
|
|
|
|
|
|
|
def test_markdown_fence_is_stripped(self):
|
|
|
|
|
fake, _ = _make_fake_client(['```json\n{"gwoeScore": 8.0}\n```'])
|
|
|
|
|
qb = QwenBewerter(client=fake)
|
|
|
|
|
result = _run(qb.bewerte(LlmRequest("sys", "usr")))
|
|
|
|
|
assert result == {"gwoeScore": 8.0}
|
|
|
|
|
|
|
|
|
|
def test_passes_model_through(self):
|
|
|
|
|
fake, calls = _make_fake_client(['{"a": 1}'])
|
|
|
|
|
qb = QwenBewerter(client=fake)
|
|
|
|
|
_run(qb.bewerte(LlmRequest("sys", "usr", model="qwen-turbo")))
|
|
|
|
|
assert calls[0]["model"] == "qwen-turbo"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestQwenBewerterRetries:
|
|
|
|
|
def test_retry_raises_temperature(self):
|
|
|
|
|
"""Bei JSON-Parse-Fehler steigt die Temperatur um 0.1 pro Versuch."""
|
|
|
|
|
fake, calls = _make_fake_client([
|
|
|
|
|
"nicht valides JSON",
|
|
|
|
|
"immer noch kaputt",
|
|
|
|
|
'{"gwoeScore": 6.0}', # 3. Versuch klappt
|
|
|
|
|
])
|
|
|
|
|
qb = QwenBewerter(client=fake)
|
|
|
|
|
request = LlmRequest("sys", "usr", max_retries=3)
|
|
|
|
|
result = _run(qb.bewerte(request))
|
|
|
|
|
assert result == {"gwoeScore": 6.0}
|
|
|
|
|
assert len(calls) == 3
|
|
|
|
|
assert calls[0]["temperature"] == pytest.approx(0.3)
|
|
|
|
|
assert calls[1]["temperature"] == pytest.approx(0.4)
|
|
|
|
|
assert calls[2]["temperature"] == pytest.approx(0.5)
|
|
|
|
|
|
|
|
|
|
def test_exhausted_retries_raise(self):
|
|
|
|
|
fake, _ = _make_fake_client([
|
|
|
|
|
"kaputt", "kaputt", "kaputt",
|
|
|
|
|
])
|
|
|
|
|
qb = QwenBewerter(client=fake)
|
|
|
|
|
request = LlmRequest("sys", "usr", max_retries=3)
|
|
|
|
|
with pytest.raises(json.JSONDecodeError):
|
|
|
|
|
_run(qb.bewerte(request))
|
|
|
|
|
|
|
|
|
|
def test_single_retry_is_respected(self):
|
|
|
|
|
"""max_retries=1 heißt: genau ein Versuch, kein Retry."""
|
|
|
|
|
fake, calls = _make_fake_client(["kaputt"])
|
|
|
|
|
qb = QwenBewerter(client=fake)
|
|
|
|
|
with pytest.raises(json.JSONDecodeError):
|
|
|
|
|
_run(qb.bewerte(LlmRequest("sys", "usr", max_retries=1)))
|
|
|
|
|
assert len(calls) == 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestLlmRequestDefaults:
|
|
|
|
|
def test_defaults_match_legacy_analyzer(self):
|
|
|
|
|
req = LlmRequest("s", "u")
|
|
|
|
|
assert req.model == "qwen-plus"
|
|
|
|
|
assert req.max_retries == 3
|
|
|
|
|
assert req.max_tokens == 4000
|
|
|
|
|
assert req.base_temperature == 0.3
|
2026-04-28 10:56:56 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# ─── Coverage-Backfill (#134) ────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestContentFingerprint:
|
|
|
|
|
def test_empty_string_returns_len_zero(self):
|
|
|
|
|
from app.adapters.qwen_bewerter import _content_fingerprint
|
|
|
|
|
assert _content_fingerprint("") == "len=0"
|
|
|
|
|
|
|
|
|
|
def test_none_returns_len_zero(self):
|
|
|
|
|
from app.adapters.qwen_bewerter import _content_fingerprint
|
|
|
|
|
# Defensiv: None toleriert, weil log-Pfad aufgerufen wird
|
|
|
|
|
# mit content.choices[0].message.content der schon mal None ist
|
|
|
|
|
assert _content_fingerprint(None) == "len=0"
|
|
|
|
|
|
|
|
|
|
def test_non_empty_includes_sha1_prefix(self):
|
|
|
|
|
from app.adapters.qwen_bewerter import _content_fingerprint
|
|
|
|
|
result = _content_fingerprint("hallo")
|
|
|
|
|
assert result.startswith("len=5 sha1=")
|
|
|
|
|
assert len(result.split("sha1=")[1]) == 8
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestStripMarkdownJsonFences:
|
|
|
|
|
"""```json-Fence wird zusaetzlich zum Plain-Fence behandelt."""
|
|
|
|
|
|
|
|
|
|
def test_json_fence_with_explicit_lang(self):
|
|
|
|
|
from app.adapters.qwen_bewerter import _strip_markdown_fences
|
|
|
|
|
s = "```json\n{\"a\": 1}\n```"
|
|
|
|
|
assert _strip_markdown_fences(s) == '{"a": 1}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestLazyClientInstantiation:
|
|
|
|
|
"""_get_client laedt openai erst beim ersten Call."""
|
|
|
|
|
|
|
|
|
|
def test_no_client_triggers_openai_import(self, monkeypatch):
|
|
|
|
|
"""Wenn der Client nicht injected ist, versucht _get_client den
|
|
|
|
|
Lazy-Import von openai.AsyncOpenAI. Hier patchen wir den Import,
|
|
|
|
|
um sicherzustellen dass _get_client tatsaechlich versucht zu
|
|
|
|
|
instanziieren (Branch-Coverage Lines 70-73)."""
|
|
|
|
|
import sys
|
|
|
|
|
from unittest.mock import MagicMock
|
|
|
|
|
from app.adapters.qwen_bewerter import QwenBewerter
|
|
|
|
|
|
|
|
|
|
fake_client = MagicMock(name="AsyncOpenAI-Instance")
|
|
|
|
|
fake_async_openai = MagicMock(return_value=fake_client)
|
|
|
|
|
fake_module = type(sys)("openai")
|
|
|
|
|
fake_module.AsyncOpenAI = fake_async_openai
|
|
|
|
|
monkeypatch.setitem(sys.modules, "openai", fake_module)
|
|
|
|
|
|
|
|
|
|
qb = QwenBewerter(api_key="test", base_url="http://test")
|
|
|
|
|
client = qb._get_client()
|
|
|
|
|
assert client is fake_client
|
|
|
|
|
fake_async_openai.assert_called_once_with(api_key="test",
|
|
|
|
|
base_url="http://test")
|
|
|
|
|
|
|
|
|
|
def test_injected_client_skips_lazy_import(self):
|
|
|
|
|
"""Wenn der Client schon im Konstruktor da ist, wird _get_client
|
|
|
|
|
ihn direkt zurueckgeben — kein openai-Import."""
|
|
|
|
|
from app.adapters.qwen_bewerter import QwenBewerter
|
|
|
|
|
injected = object()
|
|
|
|
|
qb = QwenBewerter(client=injected)
|
|
|
|
|
assert qb._get_client() is injected
|