refactor: finetune through LLMClient interface + get_running_loop
- Add submit_finetune and get_finetune_status abstract methods to LLMClient base - Implement both methods in ZhipuAIClient using asyncio.get_running_loop() - Rewrite finetune_service to call llm.submit_finetune / llm.get_finetune_status instead of accessing llm._client directly, restoring interface encapsulation - Replace asyncio.get_event_loop() with get_running_loop() in ZhipuAIClient._call and all four methods in RustFSClient (deprecated in Python 3.10+) - Update test_finetune_service to mock the LLMClient interface methods as AsyncMocks - Add two new tests in test_llm_client for submit_finetune and get_finetune_status
This commit is contained in:
@@ -1,6 +1,4 @@
|
||||
import asyncio
|
||||
|
||||
from app.core.exceptions import LLMCallError
|
||||
from app.clients.llm.base import LLMClient
|
||||
from app.core.logging import get_logger
|
||||
from app.models.finetune_models import (
|
||||
FinetuneStartRequest,
|
||||
@@ -17,45 +15,21 @@ _STATUS_MAP = {
|
||||
}
|
||||
|
||||
|
||||
async def submit_finetune(req: FinetuneStartRequest, llm) -> FinetuneStartResponse:
|
||||
"""Submit a fine-tune job to ZhipuAI and return the job ID."""
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
response = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: llm._client.fine_tuning.jobs.create(
|
||||
training_file=req.jsonl_url,
|
||||
model=req.base_model,
|
||||
hyperparameters=req.hyperparams or {},
|
||||
),
|
||||
)
|
||||
job_id = response.id
|
||||
logger.info("finetune_submit", extra={"job_id": job_id, "model": req.base_model})
|
||||
return FinetuneStartResponse(job_id=job_id)
|
||||
except Exception as exc:
|
||||
logger.error("finetune_submit_error", extra={"error": str(exc)})
|
||||
raise LLMCallError(f"微调任务提交失败: {exc}") from exc
|
||||
async def submit_finetune(req: FinetuneStartRequest, llm: LLMClient) -> FinetuneStartResponse:
|
||||
"""Submit a fine-tune job via the LLMClient interface and return the job ID."""
|
||||
job_id = await llm.submit_finetune(req.jsonl_url, req.base_model, req.hyperparams or {})
|
||||
logger.info("finetune_submit", extra={"job_id": job_id, "model": req.base_model})
|
||||
return FinetuneStartResponse(job_id=job_id)
|
||||
|
||||
|
||||
async def get_finetune_status(job_id: str, llm) -> FinetuneStatusResponse:
|
||||
"""Retrieve fine-tune job status from ZhipuAI."""
|
||||
loop = asyncio.get_event_loop()
|
||||
try:
|
||||
response = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: llm._client.fine_tuning.jobs.retrieve(job_id),
|
||||
)
|
||||
status_raw = response.status
|
||||
status = _STATUS_MAP.get(status_raw, "RUNNING") # conservative fallback
|
||||
progress = getattr(response, "progress", None)
|
||||
error_message = getattr(response, "error_message", None)
|
||||
logger.info("finetune_status", extra={"job_id": job_id, "status": status})
|
||||
return FinetuneStatusResponse(
|
||||
job_id=job_id,
|
||||
status=status,
|
||||
progress=progress,
|
||||
error_message=error_message,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error("finetune_status_error", extra={"job_id": job_id, "error": str(exc)})
|
||||
raise LLMCallError(f"微调状态查询失败: {exc}") from exc
|
||||
async def get_finetune_status(job_id: str, llm: LLMClient) -> FinetuneStatusResponse:
|
||||
"""Retrieve fine-tune job status via the LLMClient interface."""
|
||||
raw = await llm.get_finetune_status(job_id)
|
||||
status = _STATUS_MAP.get(raw["status"], "RUNNING")
|
||||
logger.info("finetune_status", extra={"job_id": job_id, "status": status})
|
||||
return FinetuneStatusResponse(
|
||||
job_id=raw["job_id"],
|
||||
status=status,
|
||||
progress=raw["progress"],
|
||||
error_message=raw["error_message"],
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user