feat: 移除fastapi 接口

This commit is contained in:
XIE7654
2025-11-07 23:43:34 +08:00
parent 43440a4aab
commit 9b2e770d1d
8 changed files with 172 additions and 51 deletions

View File

@@ -41,8 +41,8 @@ package-lock.json
**/backend-mock/data **/backend-mock/data
# local env files # local env files
.env.local #.env.local
.env.*.local #.env.*.local
.eslintcache .eslintcache
logs logs

View File

@@ -1,6 +1,15 @@
from rest_framework import serializers import asyncio
from django.http import StreamingHttpResponse
from rest_framework import serializers, status
from rest_framework.decorators import action
from rest_framework.response import Response
from ai.llm.enums import LLMProvider
from ai.llm.factory import get_adapter
from ai.models import ChatMessage from ai.models import ChatMessage
from backend import settings
from models.ai import MessageType
from utils.serializers import CustomModelSerializer from utils.serializers import CustomModelSerializer
from utils.custom_model_viewSet import CustomModelViewSet from utils.custom_model_viewSet import CustomModelViewSet
from django_filters import rest_framework as filters from django_filters import rest_framework as filters
@@ -36,3 +45,119 @@ class ChatMessageViewSet(CustomModelViewSet):
ordering_fields = ['create_time', 'id'] ordering_fields = ['create_time', 'id']
ordering = ['-create_time'] ordering = ['-create_time']
@action(detail=False, methods=['post'], url_path='stream')
def stream(self, request):
"""
流式聊天接口
"""
content = request.data.get('content')
conversation_id = request.data.get('conversation_id')
platform = request.data.get('platform', 'deepseek')
# 获取平台配置
if platform == 'tongyi':
model = 'qwen-plus'
api_key = settings.DASHSCOPE_API_KEY
provider = LLMProvider.TONGYI
else:
# 默认使用 DeepSeek
model = 'deepseek-chat'
api_key = settings.DEEPSEEK_API_KEY
provider = LLMProvider.DEEPSEEK
# 获取当前用户
user_id = request.user.id
try:
# 获取或创建对话
conversation = ChatMessage.objects.filter(conversation_id=conversation_id).order_by('id')
except ValueError as e:
return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
# 添加用户消息
ChatMessage.objects.create(
conversation_id=conversation_id,
user_id=user_id,
role_id=None,
model=model,
model_id=None,
type=MessageType.USER,
reply_id=None,
content=content,
use_context=True,
segment_ids=None,
)
# 构建上下文
context = [("system", "You are a helpful assistant")]
history = ChatMessage.objects.filter(conversation_id=conversation_id).order_by('id')
for msg in history:
context.append((msg.type, msg.content))
# 获取LLM适配器
llm = get_adapter(provider, api_key=api_key, model=model)
# 创建流式响应
# 8. 同步生成器包装异步LLM流核心修复点
def generate():
ai_reply = ""
loop = None
try:
# 创建新的事件循环(避免复用主线程循环)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# 异步生成器包装函数
async def async_stream():
# 调用LLM的异步流式接口假设 llm.stream_chat 是 async_generator
async for chunk in llm.stream_chat(context):
yield chunk
# 将异步生成器转换为同步迭代
async_gen = async_stream()
while True:
try:
# 逐个获取异步chunk
chunk = loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
break # 流结束,退出循环
except Exception as e:
# 捕获LLM流异常返回错误信息
yield f"data: 错误:{str(e)}\n\n"
break
# 提取chunk内容适配不同LLM的返回格式
if hasattr(chunk, 'content'):
chunk_content = chunk.content.strip()
elif isinstance(chunk, dict) and 'content' in chunk:
chunk_content = chunk['content'].strip()
else:
chunk_content = str(chunk).strip()
# 只返回非空内容
if chunk_content:
ai_reply += chunk_content
# 遵循SSE格式data: 内容\n\n必须以\n\n结尾
yield f"data: {chunk_content}\n\n"
finally:
# 关闭事件循环(避免资源泄漏)
if loop:
loop.close()
# 保存AI回复
if ai_reply.strip():
ChatMessage.objects.create(
conversation_id=conversation_id,
user_id=user_id,
role_id=None,
model=model,
model_id=None,
type=MessageType.ASSISTANT,
reply_id=None,
content=ai_reply,
use_context=True,
segment_ids=None,
)
return StreamingHttpResponse(generate(), content_type='text/event-stream')

View File

@@ -103,7 +103,7 @@ DATABASES = {
'ENGINE': 'django.db.backends.mysql', 'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_vue', 'NAME': 'django_vue',
'USER': os.getenv('DB_USER', 'chenze'), 'USER': os.getenv('DB_USER', 'chenze'),
'PASSWORD': os.getenv('DB_PASSWORD', 'my-secret-pw'), 'PASSWORD': os.getenv('DB_PASSWORD', '123456'),
'HOST': os.getenv('DB_HOST', 'localhost'), 'HOST': os.getenv('DB_HOST', 'localhost'),
} }
} }

View File

@@ -112,25 +112,25 @@ services:
env_file: env_file:
- ./docker/.env.dev - ./docker/.env.dev
- ./docker/.env.local - ./docker/.env.local
#
ai_service: # ai_service:
build: # build:
context: ./ai_service # context: ./ai_service
dockerfile: Dockerfile # dockerfile: Dockerfile
target: dev # target: dev
volumes: # volumes:
- ./ai_service:/app # - ./ai_service:/app
ports: # ports:
- "48010:8010" # - "48010:8010"
depends_on: # depends_on:
- db # - db
- redis # - redis
networks: # networks:
- dj_admin_network # - dj_admin_network
env_file: # env_file:
- ./docker/.env.dev # - ./docker/.env.dev
- ./docker/.env.local # - ./docker/.env.local
command: uvicorn main:app --host 0.0.0.0 --port 8010 --reload # command: uvicorn main:app --host 0.0.0.0 --port 8010 --reload
networks: networks:
dj_admin_network: dj_admin_network:

View File

@@ -95,24 +95,24 @@ services:
networks: networks:
- app_net - app_net
ai_service: # ai_service:
restart: always # restart: always
build: # build:
context: ./ai_service # context: ./ai_service
dockerfile: Dockerfile # 复用 backend 的 Dockerfile # dockerfile: Dockerfile # 复用 backend 的 Dockerfile
target: prod # target: prod
volumes: # volumes:
- ./ai_service:/app # - ./ai_service:/app
ports: # ports:
- "38010:8010" # - "38010:8010"
depends_on: # depends_on:
- db # - db
- redis # - redis
networks: # networks:
- app_net # - app_net
env_file: # env_file:
- ./docker/.env.prod # - ./docker/.env.prod
- ./docker/.env.local # - ./docker/.env.local
frontend: frontend:
restart: always restart: always

View File

@@ -35,7 +35,7 @@ export async function fetchAIStream({
platform, platform,
conversation_id, conversation_id,
}: FetchAIStreamParams) { }: FetchAIStreamParams) {
const res = await fetchWithAuth('chat/stream', { const res = await fetchWithAuth('ai/chat_message/stream/', {
method: 'POST', method: 'POST',
body: JSON.stringify({ content, platform, conversation_id }), body: JSON.stringify({ content, platform, conversation_id }),
}); });

View File

@@ -2,7 +2,7 @@ import { useAccessStore } from '@vben/stores';
import { formatToken } from '#/utils/auth'; import { formatToken } from '#/utils/auth';
export const API_BASE = '/api/ai/v1/'; export const API_BASE = '/api/admin/';
export function fetchWithAuth(input: RequestInfo, init: RequestInit = {}) { export function fetchWithAuth(input: RequestInfo, init: RequestInit = {}) {
const accessStore = useAccessStore(); const accessStore = useAccessStore();

View File

@@ -13,14 +13,10 @@ import {
Row, Row,
Select, Select,
} from 'ant-design-vue'; } from 'ant-design-vue';
import {
createConversation, import { AiChatConversationModel } from '#/models/ai/chat_conversation';
fetchAIStream, import { AiChatMessageModel } from '#/models/ai/chat_message';
getConversations, import {fetchAIStream} from "#/api/ai/chat";
getMessages,
} from '#/api/ai/chat';
import {AiChatConversationModel} from "#/models/ai/chat_conversation";
import {AiChatMessageModel} from "#/models/ai/chat_message";
const aiChatConversation = new AiChatConversationModel(); const aiChatConversation = new AiChatConversationModel();
const aiChatMessageModel = new AiChatMessageModel(); const aiChatMessageModel = new AiChatMessageModel();