- Add submit_finetune and get_finetune_status abstract methods to LLMClient base - Implement both methods in ZhipuAIClient using asyncio.get_running_loop() - Rewrite finetune_service to call llm.submit_finetune / llm.get_finetune_status instead of accessing llm._client directly, restoring interface encapsulation - Replace asyncio.get_event_loop() with get_running_loop() in ZhipuAIClient._call and all four methods in RustFSClient (deprecated in Python 3.10+) - Update test_finetune_service to mock the LLMClient interface methods as AsyncMocks - Add two new tests in test_llm_client for submit_finetune and get_finetune_status
152 lines
4.7 KiB
Python
152 lines
4.7 KiB
Python
"""Tests for finetune_service — uses LLMClient interface (no internal SDK access)."""
|
|
import pytest
|
|
from unittest.mock import MagicMock, AsyncMock
|
|
|
|
from app.clients.llm.base import LLMClient
|
|
from app.core.exceptions import LLMCallError
|
|
from app.models.finetune_models import (
|
|
FinetuneStartRequest,
|
|
FinetuneStartResponse,
|
|
FinetuneStatusResponse,
|
|
)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _make_llm(job_id: str = "glm-ft-test", status: str = "running", progress: int | None = None):
|
|
"""Return a MagicMock(spec=LLMClient) with submit_finetune and get_finetune_status as AsyncMocks."""
|
|
llm = MagicMock(spec=LLMClient)
|
|
llm.submit_finetune = AsyncMock(return_value=job_id)
|
|
llm.get_finetune_status = AsyncMock(return_value={
|
|
"job_id": job_id,
|
|
"status": status,
|
|
"progress": progress,
|
|
"error_message": None,
|
|
})
|
|
return llm
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# submit_finetune
|
|
# ---------------------------------------------------------------------------
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_submit_finetune_returns_job_id():
|
|
from app.services.finetune_service import submit_finetune
|
|
|
|
llm = _make_llm(job_id="glm-ft-abc123")
|
|
req = FinetuneStartRequest(
|
|
jsonl_url="s3://bucket/train.jsonl",
|
|
base_model="glm-4",
|
|
hyperparams={"n_epochs": 3},
|
|
)
|
|
|
|
result = await submit_finetune(req, llm)
|
|
|
|
assert isinstance(result, FinetuneStartResponse)
|
|
assert result.job_id == "glm-ft-abc123"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_submit_finetune_calls_interface_with_correct_params():
|
|
from app.services.finetune_service import submit_finetune
|
|
|
|
llm = _make_llm(job_id="glm-ft-xyz")
|
|
req = FinetuneStartRequest(
|
|
jsonl_url="s3://bucket/train.jsonl",
|
|
base_model="glm-4",
|
|
hyperparams={"n_epochs": 5},
|
|
)
|
|
|
|
await submit_finetune(req, llm)
|
|
|
|
llm.submit_finetune.assert_awaited_once_with(
|
|
"s3://bucket/train.jsonl",
|
|
"glm-4",
|
|
{"n_epochs": 5},
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_submit_finetune_none_hyperparams_passes_empty_dict():
|
|
"""hyperparams=None should be passed as {} to the interface."""
|
|
from app.services.finetune_service import submit_finetune
|
|
|
|
llm = _make_llm(job_id="glm-ft-nohp")
|
|
req = FinetuneStartRequest(
|
|
jsonl_url="s3://bucket/train.jsonl",
|
|
base_model="glm-4",
|
|
)
|
|
|
|
await submit_finetune(req, llm)
|
|
|
|
llm.submit_finetune.assert_awaited_once_with(
|
|
"s3://bucket/train.jsonl",
|
|
"glm-4",
|
|
{},
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_submit_finetune_raises_llm_call_error_on_failure():
|
|
from app.services.finetune_service import submit_finetune
|
|
|
|
llm = MagicMock(spec=LLMClient)
|
|
llm.submit_finetune = AsyncMock(side_effect=LLMCallError("微调任务提交失败: SDK exploded"))
|
|
|
|
req = FinetuneStartRequest(
|
|
jsonl_url="s3://bucket/train.jsonl",
|
|
base_model="glm-4",
|
|
)
|
|
|
|
with pytest.raises(LLMCallError):
|
|
await submit_finetune(req, llm)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# get_finetune_status — status mapping
|
|
# ---------------------------------------------------------------------------
|
|
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.parametrize("sdk_status,expected", [
|
|
("running", "RUNNING"),
|
|
("succeeded", "SUCCESS"),
|
|
("failed", "FAILED"),
|
|
("pending", "RUNNING"), # unknown → conservative RUNNING
|
|
("queued", "RUNNING"), # unknown → conservative RUNNING
|
|
("cancelled", "RUNNING"), # unknown → conservative RUNNING
|
|
])
|
|
async def test_get_finetune_status_maps_status(sdk_status, expected):
|
|
from app.services.finetune_service import get_finetune_status
|
|
|
|
llm = _make_llm(status=sdk_status)
|
|
|
|
result = await get_finetune_status("glm-ft-test", llm)
|
|
|
|
assert isinstance(result, FinetuneStatusResponse)
|
|
assert result.status == expected
|
|
assert result.job_id == "glm-ft-test"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_get_finetune_status_includes_progress():
|
|
from app.services.finetune_service import get_finetune_status
|
|
|
|
llm = _make_llm(status="running", progress=42)
|
|
result = await get_finetune_status("glm-ft-test", llm)
|
|
|
|
assert result.progress == 42
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_get_finetune_status_raises_llm_call_error_on_failure():
|
|
from app.services.finetune_service import get_finetune_status
|
|
|
|
llm = MagicMock(spec=LLMClient)
|
|
llm.get_finetune_status = AsyncMock(side_effect=LLMCallError("查询微调任务失败: SDK exploded"))
|
|
|
|
with pytest.raises(LLMCallError):
|
|
await get_finetune_status("glm-ft-bad", llm)
|