38 lines
1.3 KiB
Python
38 lines
1.3 KiB
Python
|
|
import asyncio
|
||
|
|
|
||
|
|
from zhipuai import ZhipuAI
|
||
|
|
|
||
|
|
from app.clients.llm.base import LLMClient
|
||
|
|
from app.core.exceptions import LLMCallError
|
||
|
|
from app.core.logging import get_logger
|
||
|
|
|
||
|
|
logger = get_logger(__name__)
|
||
|
|
|
||
|
|
|
||
|
|
class ZhipuAIClient(LLMClient):
|
||
|
|
def __init__(self, api_key: str) -> None:
|
||
|
|
self._client = ZhipuAI(api_key=api_key)
|
||
|
|
|
||
|
|
async def chat(self, model: str, messages: list[dict]) -> str:
|
||
|
|
return await self._call(model, messages)
|
||
|
|
|
||
|
|
async def chat_vision(self, model: str, messages: list[dict]) -> str:
|
||
|
|
return await self._call(model, messages)
|
||
|
|
|
||
|
|
async def _call(self, model: str, messages: list[dict]) -> str:
|
||
|
|
loop = asyncio.get_event_loop()
|
||
|
|
try:
|
||
|
|
response = await loop.run_in_executor(
|
||
|
|
None,
|
||
|
|
lambda: self._client.chat.completions.create(
|
||
|
|
model=model,
|
||
|
|
messages=messages,
|
||
|
|
),
|
||
|
|
)
|
||
|
|
content = response.choices[0].message.content
|
||
|
|
logger.info("llm_call", extra={"model": model, "response_len": len(content)})
|
||
|
|
return content
|
||
|
|
except Exception as exc:
|
||
|
|
logger.error("llm_call_error", extra={"model": model, "error": str(exc)})
|
||
|
|
raise LLMCallError(f"大模型调用失败: {exc}") from exc
|