diff --git a/.dockerignore b/.dockerignore index 0507dad..b08ff43 100644 --- a/.dockerignore +++ b/.dockerignore @@ -41,8 +41,8 @@ package-lock.json **/backend-mock/data # local env files -.env.local -.env.*.local +#.env.local +#.env.*.local .eslintcache logs diff --git a/backend/ai/views/chat_message.py b/backend/ai/views/chat_message.py index 4c4611e..3c47db7 100644 --- a/backend/ai/views/chat_message.py +++ b/backend/ai/views/chat_message.py @@ -1,6 +1,15 @@ -from rest_framework import serializers +import asyncio +from django.http import StreamingHttpResponse +from rest_framework import serializers, status +from rest_framework.decorators import action +from rest_framework.response import Response + +from ai.llm.enums import LLMProvider +from ai.llm.factory import get_adapter from ai.models import ChatMessage +from backend import settings +from models.ai import MessageType from utils.serializers import CustomModelSerializer from utils.custom_model_viewSet import CustomModelViewSet from django_filters import rest_framework as filters @@ -36,3 +45,119 @@ class ChatMessageViewSet(CustomModelViewSet): ordering_fields = ['create_time', 'id'] ordering = ['-create_time'] + @action(detail=False, methods=['post'], url_path='stream') + def stream(self, request): + """ + 流式聊天接口 + """ + content = request.data.get('content') + conversation_id = request.data.get('conversation_id') + platform = request.data.get('platform', 'deepseek') + + # 获取平台配置 + if platform == 'tongyi': + model = 'qwen-plus' + api_key = settings.DASHSCOPE_API_KEY + provider = LLMProvider.TONGYI + else: + # 默认使用 DeepSeek + model = 'deepseek-chat' + api_key = settings.DEEPSEEK_API_KEY + provider = LLMProvider.DEEPSEEK + + # 获取当前用户 + user_id = request.user.id + + try: + # 获取或创建对话 + conversation = ChatMessage.objects.filter(conversation_id=conversation_id).order_by('id') + except ValueError as e: + return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST) + + # 添加用户消息 + ChatMessage.objects.create( + conversation_id=conversation_id, + user_id=user_id, + role_id=None, + model=model, + model_id=None, + type=MessageType.USER, + reply_id=None, + content=content, + use_context=True, + segment_ids=None, + ) + # 构建上下文 + context = [("system", "You are a helpful assistant")] + history = ChatMessage.objects.filter(conversation_id=conversation_id).order_by('id') + + for msg in history: + context.append((msg.type, msg.content)) + + # 获取LLM适配器 + llm = get_adapter(provider, api_key=api_key, model=model) + + # 创建流式响应 + + # 8. 同步生成器(包装异步LLM流,核心修复点) + def generate(): + ai_reply = "" + loop = None + try: + # 创建新的事件循环(避免复用主线程循环) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + # 异步生成器包装函数 + async def async_stream(): + # 调用LLM的异步流式接口(假设 llm.stream_chat 是 async_generator) + async for chunk in llm.stream_chat(context): + yield chunk + + # 将异步生成器转换为同步迭代 + async_gen = async_stream() + while True: + try: + # 逐个获取异步chunk + chunk = loop.run_until_complete(async_gen.__anext__()) + except StopAsyncIteration: + break # 流结束,退出循环 + except Exception as e: + # 捕获LLM流异常,返回错误信息 + yield f"data: 错误:{str(e)}\n\n" + break + + # 提取chunk内容(适配不同LLM的返回格式) + if hasattr(chunk, 'content'): + chunk_content = chunk.content.strip() + elif isinstance(chunk, dict) and 'content' in chunk: + chunk_content = chunk['content'].strip() + else: + chunk_content = str(chunk).strip() + + # 只返回非空内容 + if chunk_content: + ai_reply += chunk_content + # 遵循SSE格式:data: 内容\n\n(必须以\n\n结尾) + yield f"data: {chunk_content}\n\n" + + finally: + # 关闭事件循环(避免资源泄漏) + if loop: + loop.close() + # 保存AI回复 + if ai_reply.strip(): + ChatMessage.objects.create( + conversation_id=conversation_id, + user_id=user_id, + role_id=None, + model=model, + model_id=None, + type=MessageType.ASSISTANT, + reply_id=None, + content=ai_reply, + use_context=True, + segment_ids=None, + ) + + return StreamingHttpResponse(generate(), content_type='text/event-stream') diff --git a/backend/backend/settings.py b/backend/backend/settings.py index a5ca487..043c12f 100644 --- a/backend/backend/settings.py +++ b/backend/backend/settings.py @@ -103,7 +103,7 @@ DATABASES = { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'django_vue', 'USER': os.getenv('DB_USER', 'chenze'), - 'PASSWORD': os.getenv('DB_PASSWORD', 'my-secret-pw'), + 'PASSWORD': os.getenv('DB_PASSWORD', '123456'), 'HOST': os.getenv('DB_HOST', 'localhost'), } } diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 4e32c10..f54946d 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -112,25 +112,25 @@ services: env_file: - ./docker/.env.dev - ./docker/.env.local - - ai_service: - build: - context: ./ai_service - dockerfile: Dockerfile - target: dev - volumes: - - ./ai_service:/app - ports: - - "48010:8010" - depends_on: - - db - - redis - networks: - - dj_admin_network - env_file: - - ./docker/.env.dev - - ./docker/.env.local - command: uvicorn main:app --host 0.0.0.0 --port 8010 --reload +# +# ai_service: +# build: +# context: ./ai_service +# dockerfile: Dockerfile +# target: dev +# volumes: +# - ./ai_service:/app +# ports: +# - "48010:8010" +# depends_on: +# - db +# - redis +# networks: +# - dj_admin_network +# env_file: +# - ./docker/.env.dev +# - ./docker/.env.local +# command: uvicorn main:app --host 0.0.0.0 --port 8010 --reload networks: dj_admin_network: diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml index eb2418b..457784b 100644 --- a/docker-compose.prod.yml +++ b/docker-compose.prod.yml @@ -95,24 +95,24 @@ services: networks: - app_net - ai_service: - restart: always - build: - context: ./ai_service - dockerfile: Dockerfile # 复用 backend 的 Dockerfile - target: prod - volumes: - - ./ai_service:/app - ports: - - "38010:8010" - depends_on: - - db - - redis - networks: - - app_net - env_file: - - ./docker/.env.prod - - ./docker/.env.local +# ai_service: +# restart: always +# build: +# context: ./ai_service +# dockerfile: Dockerfile # 复用 backend 的 Dockerfile +# target: prod +# volumes: +# - ./ai_service:/app +# ports: +# - "38010:8010" +# depends_on: +# - db +# - redis +# networks: +# - app_net +# env_file: +# - ./docker/.env.prod +# - ./docker/.env.local frontend: restart: always diff --git a/web/apps/web-antd/src/api/ai/chat.ts b/web/apps/web-antd/src/api/ai/chat.ts index d9e910d..27aab5c 100644 --- a/web/apps/web-antd/src/api/ai/chat.ts +++ b/web/apps/web-antd/src/api/ai/chat.ts @@ -35,7 +35,7 @@ export async function fetchAIStream({ platform, conversation_id, }: FetchAIStreamParams) { - const res = await fetchWithAuth('chat/stream', { + const res = await fetchWithAuth('ai/chat_message/stream/', { method: 'POST', body: JSON.stringify({ content, platform, conversation_id }), }); diff --git a/web/apps/web-antd/src/utils/fetch-with-auth.ts b/web/apps/web-antd/src/utils/fetch-with-auth.ts index c53f527..d370aa0 100644 --- a/web/apps/web-antd/src/utils/fetch-with-auth.ts +++ b/web/apps/web-antd/src/utils/fetch-with-auth.ts @@ -2,7 +2,7 @@ import { useAccessStore } from '@vben/stores'; import { formatToken } from '#/utils/auth'; -export const API_BASE = '/api/ai/v1/'; +export const API_BASE = '/api/admin/'; export function fetchWithAuth(input: RequestInfo, init: RequestInit = {}) { const accessStore = useAccessStore(); diff --git a/web/apps/web-antd/src/views/ai/chat/index.vue b/web/apps/web-antd/src/views/ai/chat/index.vue index c2fd688..793c1fb 100644 --- a/web/apps/web-antd/src/views/ai/chat/index.vue +++ b/web/apps/web-antd/src/views/ai/chat/index.vue @@ -13,14 +13,10 @@ import { Row, Select, } from 'ant-design-vue'; -import { - createConversation, - fetchAIStream, - getConversations, - getMessages, -} from '#/api/ai/chat'; -import {AiChatConversationModel} from "#/models/ai/chat_conversation"; -import {AiChatMessageModel} from "#/models/ai/chat_message"; + +import { AiChatConversationModel } from '#/models/ai/chat_conversation'; +import { AiChatMessageModel } from '#/models/ai/chat_message'; +import {fetchAIStream} from "#/api/ai/chat"; const aiChatConversation = new AiChatConversationModel(); const aiChatMessageModel = new AiChatMessageModel();