diff --git a/backend/ai/chat.py b/backend/ai/chat.py new file mode 100644 index 0000000..0431b2e --- /dev/null +++ b/backend/ai/chat.py @@ -0,0 +1,26 @@ +from channels.generic.websocket import AsyncWebsocketConsumer +import json +from ai.langchain_client import get_ai_reply_stream +from ai.utils import get_first_available_ai_config + + +class ChatConsumer(AsyncWebsocketConsumer): + async def connect(self): + await self.accept() + + async def disconnect(self, close_code): + pass + + async def receive(self, text_data): + data = json.loads(text_data) + user_message = data.get("message", "") + + model, api_key, api_base = await get_first_available_ai_config() + + async def send_chunk(chunk): + await self.send(text_data=json.dumps({"is_streaming": True, "message": chunk})) + + await get_ai_reply_stream(user_message, send_chunk, model_name=model, api_key=api_key, api_base=api_base) + + # 结束标记 + await self.send(text_data=json.dumps({"done": True})) \ No newline at end of file diff --git a/backend/ai/langchain_client.py b/backend/ai/langchain_client.py new file mode 100644 index 0000000..aa215f6 --- /dev/null +++ b/backend/ai/langchain_client.py @@ -0,0 +1,25 @@ +from langchain.schema import HumanMessage + +from langchain_core.callbacks import AsyncCallbackHandler +from langchain_community.chat_models import ChatOpenAI + + +class MyHandler(AsyncCallbackHandler): + def __init__(self, send_func): + super().__init__() + self.send_func = send_func + + async def on_llm_new_token(self, token: str, **kwargs): + await self.send_func(token) + +async def get_ai_reply_stream(message: str, send_func, api_key, api_base, model_name): + # 实例化时就带回调 + chat = ChatOpenAI( + openai_api_key=api_key, + openai_api_base=api_base, + model_name=model_name, + temperature=0.7, + streaming=True, + callbacks=[MyHandler(send_func)] + ) + await chat.ainvoke([HumanMessage(content=message)]) \ No newline at end of file diff --git a/backend/ai/models.py b/backend/ai/models.py index 26c487f..c2d3c9a 100644 --- a/backend/ai/models.py +++ b/backend/ai/models.py @@ -218,14 +218,12 @@ class ChatRole(CoreModel): blank=True, related_name="roles", verbose_name="关联的知识库", - db_comment="关联的知识库" ) tools = models.ManyToManyField( 'Tool', blank=True, related_name="roles", verbose_name="关联的工具", - db_comment="关联的工具" ) class Meta: diff --git a/backend/ai/routing.py b/backend/ai/routing.py new file mode 100644 index 0000000..ba82f67 --- /dev/null +++ b/backend/ai/routing.py @@ -0,0 +1,7 @@ +from django.urls import re_path + +from ai.chat import ChatConsumer + +websocket_urlpatterns = [ + re_path(r'ws/chat/$', ChatConsumer.as_asgi()), +] \ No newline at end of file diff --git a/backend/ai/utils.py b/backend/ai/utils.py new file mode 100644 index 0000000..240d793 --- /dev/null +++ b/backend/ai/utils.py @@ -0,0 +1,11 @@ +from ai.models import AIModel +from utils.models import CommonStatus +from asgiref.sync import sync_to_async + +@sync_to_async +def get_first_available_ai_config(): + # 这里只取第一个可用的,可以根据实际业务加筛选条件 + ai = AIModel.objects.filter(status=CommonStatus.ENABLED).prefetch_related('key').first() + if not ai: + raise Exception('没有可用的AI配置') + return ai.model, ai.key.api_key, ai.key.url \ No newline at end of file diff --git a/backend/backend/asgi.py b/backend/backend/asgi.py index 6aa1b52..8421d44 100644 --- a/backend/backend/asgi.py +++ b/backend/backend/asgi.py @@ -1,16 +1,17 @@ -""" -ASGI config for backend project. - -It exposes the ASGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/5.2/howto/deployment/asgi/ -""" - import os - from django.core.asgi import get_asgi_application +from channels.routing import ProtocolTypeRouter, URLRouter os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') -application = get_asgi_application() +# 延迟导入,避免 AppRegistryNotReady 错误 +def get_websocket_urlpatterns(): + from ai.routing import websocket_urlpatterns + return websocket_urlpatterns + +application = ProtocolTypeRouter({ + "http": get_asgi_application(), + "websocket": URLRouter( + get_websocket_urlpatterns() + ), +}) \ No newline at end of file diff --git a/backend/backend/settings.py b/backend/backend/settings.py index 2a9681c..988ccec 100644 --- a/backend/backend/settings.py +++ b/backend/backend/settings.py @@ -53,6 +53,7 @@ INSTALLED_APPS = [ 'django_filters', 'corsheaders', 'rest_framework.authtoken', + 'channels', "system", "ai", ] @@ -231,5 +232,15 @@ LOGGING = { } } +ASGI_APPLICATION = 'backend.asgi.application' + + +# 简单用内存通道层 +CHANNEL_LAYERS = { + 'default': { + 'BACKEND': 'channels.layers.InMemoryChannelLayer' + } +} + if os.path.exists(os.path.join(BASE_DIR, 'backend/local_settings.py')): from backend.local_settings import * \ No newline at end of file diff --git a/backend/requirements.txt b/backend/requirements.txt index 34091e2..b89c8da 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -13,4 +13,9 @@ eventlet==0.40.0 goofish_api==0.0.6 flower==2.0.1 gunicorn==23.0.0 -django_redis==6.0.0 \ No newline at end of file +django_redis==6.0.0 +django-ninja==1.4.3 +openai==1.95 +daphne==4.2.1 +langchain==0.3.26 +langchain-community==0.3.27 \ No newline at end of file diff --git a/web/apps/web-antd/src/views/ai/chat/index.vue b/web/apps/web-antd/src/views/ai/chat/index.vue new file mode 100644 index 0000000..2e32cf5 --- /dev/null +++ b/web/apps/web-antd/src/views/ai/chat/index.vue @@ -0,0 +1,285 @@ + + + + + + + 🧑: {{ msg.content }} + 🤖: {{ msg.content }} + + AI 正在思考... + + + + 发送 + + + + + diff --git a/web/apps/web-antd/vite.config.mts b/web/apps/web-antd/vite.config.mts index 5146e4a..b62f598 100644 --- a/web/apps/web-antd/vite.config.mts +++ b/web/apps/web-antd/vite.config.mts @@ -1,5 +1,3 @@ -import * as console from 'node:console'; - import { defineConfig } from '@vben/vite-config'; import { loadEnv } from 'vite'; @@ -9,8 +7,8 @@ import vitePluginOss from './plugins/vite-plugin-oss.mjs'; export default defineConfig(async ({ mode }) => { // eslint-disable-next-line n/prefer-global/process const env = loadEnv(mode, process.cwd()); - // 这样获取 - const backendUrl = env.VITE_BACKEND_URL; + // 这样获取,提供默认值 + const backendUrl = env.VITE_BACKEND_URL || 'http://localhost:8000'; // 判断是否为构建模式 const isBuild = mode === 'production'; @@ -28,6 +26,11 @@ export default defineConfig(async ({ mode }) => { target: backendUrl, changeOrigin: true, }, + '/ws': { + target: backendUrl, + changeOrigin: true, + ws: true, // 启用WebSocket代理 + }, }, }, plugins: [