Files
label_ai_service/tests/test_llm_client.py

41 lines
1.3 KiB
Python
Raw Normal View History

import pytest
from unittest.mock import MagicMock, patch
from app.clients.llm.zhipuai_client import ZhipuAIClient
from app.core.exceptions import LLMCallError
@pytest.fixture
def mock_sdk_response():
resp = MagicMock()
resp.choices[0].message.content = '{"result": "ok"}'
return resp
@pytest.fixture
def client():
with patch("app.clients.llm.zhipuai_client.ZhipuAI"):
c = ZhipuAIClient(api_key="test-key")
return c
@pytest.mark.asyncio
async def test_chat_returns_content(client, mock_sdk_response):
client._client.chat.completions.create.return_value = mock_sdk_response
result = await client.chat("glm-4-flash", [{"role": "user", "content": "hello"}])
assert result == '{"result": "ok"}'
@pytest.mark.asyncio
async def test_chat_vision_returns_content(client, mock_sdk_response):
client._client.chat.completions.create.return_value = mock_sdk_response
result = await client.chat_vision("glm-4v-flash", [{"role": "user", "content": []}])
assert result == '{"result": "ok"}'
@pytest.mark.asyncio
async def test_llm_call_error_on_sdk_exception(client):
client._client.chat.completions.create.side_effect = RuntimeError("quota exceeded")
with pytest.raises(LLMCallError, match="大模型调用失败"):
await client.chat("glm-4-flash", [{"role": "user", "content": "hi"}])