From 028d39a5e912e495c8f06f3ea32668d048a8215a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=F0=9F=8F=8E=EF=B8=8F=20Yumo?= Date: Fri, 7 Feb 2025 15:43:01 +0800 Subject: [PATCH] docs: add docs of integrate DeepSeek's model services --- docs/react/model-use-deepseek.en-US.md | 145 +++++++++++++++++++++++++ docs/react/model-use-deepseek.zh-CN.md | 145 +++++++++++++++++++++++++ 2 files changed, 290 insertions(+) create mode 100644 docs/react/model-use-deepseek.en-US.md create mode 100644 docs/react/model-use-deepseek.zh-CN.md diff --git a/docs/react/model-use-deepseek.en-US.md b/docs/react/model-use-deepseek.en-US.md new file mode 100644 index 00000000..586471bd --- /dev/null +++ b/docs/react/model-use-deepseek.en-US.md @@ -0,0 +1,145 @@ +--- +group: + title: Model Integration +title: DeepSeek +order: 1 +tag: Updated +--- + +This guide explains how to integrate DeepSeek's model services into applications built with Ant Design X. + +DeepSeek's model inference service supports an **OpenAI-compatible mode**. For details, refer to the official documentation: [DeepSeek - First API Call](https://api-docs.deepseek.com) + +### Obtaining Required Parameters + +- How to get baseURL - +- How to get API Key - + +## Integrating DeepSeek-R1 + +DeepSeek-R1: [`deepseek-reasoner`](https://api-docs.deepseek.com/guides/reasoning_model) is DeepSeek's reasoning model. Before outputting the final answer, the model first generates chain-of-thought content to improve answer accuracy. + +The `deepseek-reasoner` model output includes additional **reasoning content** (`reasoning_content`) at the same level as the final answer (`content`). During each conversation turn, the model outputs both the reasoning content and final answer. + +> Warning: 🔥 `dangerouslyAllowBrowser` carries security risks. Refer to the official [openai-node documentation](https://github.com/openai/openai-node?tab=readme-ov-file#requirements) for details. + +```tsx +import { useXAgent, useXChat, Sender, Bubble } from '@ant-design/x'; +import OpenAI from 'openai'; +import React from 'react'; + +import type { GetProp } from 'antd'; + +const client = new OpenAI({ + baseURL: 'https://api.deepseek.com', + apiKey: process.env['DEEPSEEK_API_KEY'], + dangerouslyAllowBrowser: true, +}); + +const DeepSeekR1 = 'deepseek-reasoner'; + +interface YourMessage { + /** + * @description The content of model answer + */ + content: string; + /** + * @description The content of model reasoning + */ + reasoning_content: string; +} + +const Demo: React.FC = () => { + const [agent] = useXAgent({ + request: async (info, callbacks) => { + const { messages, message } = info; + + const { onSuccess, onUpdate, onError } = callbacks; + + // current message + console.log('message', message); + + // history messages + console.log('messages', messages); + + let content: string = ''; + let reasoning_content: string = ''; + + try { + const stream = await client.chat.completions.create({ + model: DeepSeekR1, + // if chat context is needed, modify the array + messages: [{ role: 'user', content: message?.content as string }], + // stream mode + stream: true, + }); + + for await (const chunk of stream) { + const { reasoning_content: deltaReasoningContent, content: deltaContent } = (chunk + .choices[0]?.delta || {}) as YourMessage; + + // update reasoning content + if (deltaReasoningContent) { + reasoning_content += deltaReasoningContent; + } + + // update content + if (deltaContent) { + content += deltaContent; + } + + onUpdate({ + content, + reasoning_content, + }); + } + + onSuccess({ + content, + reasoning_content, + }); + } catch (error) { + // handle error + // onError(); + } + }, + }); + + const { + // use to send message + onRequest, + // use to render messages + messages, + } = useXChat({ agent }); + + const onSubmit = (value: string) => { + onRequest({ content: value, reasoning_content: '' }); + }; + + console.log(messages); + + const items: GetProp = messages.map(({ message, id }) => ({ + // key is required, used to identify the message + key: id, + messageRender() { + return ( + <> + {/** render reasoning content */} +
{message.reasoning_content}
+ {/** render content */} +
{message.content}
+ + ); + }, + })); + + return ( +
+ + +
+ ); +}; + +export default Demo; +``` diff --git a/docs/react/model-use-deepseek.zh-CN.md b/docs/react/model-use-deepseek.zh-CN.md new file mode 100644 index 00000000..f9cebb2e --- /dev/null +++ b/docs/react/model-use-deepseek.zh-CN.md @@ -0,0 +1,145 @@ +--- +group: + title: 模型接入 +title: DeepSeek +order: 1 +tag: Updated +--- + +这篇指南将介绍如何在使用 Ant Design X 搭建的应用中接入 DeepSeek 提供的模型服务。 + +DeepSeek 的模型推理服务支持「兼容 OpenAI 模式」。详见官方文档: [DeepSeek - 首次调用 API](https://api-docs.deepseek.com) + +### 相关参数获取 + +- 如何获取 baseURL - +- 如何获取 API Key - + +## 对接 DeepSeek-R1 推理模型 + +DeepSeek-R1:[`deepseek-reasoner`](https://api-docs.deepseek.com/guides/reasoning_model) 是 DeepSeek 推出的推理模型。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。 + +`deepseek-reasoner` 模型的输出字段增加了 思维链内容(reasoning_content),与最终回答(content)同级。在每一轮对话过程中,模型会输出思维链内容(reasoning_content)和最终回答(content)。 + +> 注意: 🔥 `dangerouslyAllowBrowser` 存在安全风险,对此 openai-node 的官方文档有详细的[说明](https://github.com/openai/openai-node?tab=readme-ov-file#requirements)。 + +```tsx +import { useXAgent, useXChat, Sender, Bubble } from '@ant-design/x'; +import OpenAI from 'openai'; +import React from 'react'; + +import type { GetProp } from 'antd'; + +const client = new OpenAI({ + baseURL: 'https://api.deepseek.com', + apiKey: process.env['DEEPSEEK_API_KEY'], + dangerouslyAllowBrowser: true, +}); + +const DeepSeekR1 = 'deepseek-reasoner'; + +interface YourMessage { + /** + * @description The content of model answer + */ + content: string; + /** + * @description The content of model reasoning + */ + reasoning_content: string; +} + +const Demo: React.FC = () => { + const [agent] = useXAgent({ + request: async (info, callbacks) => { + const { messages, message } = info; + + const { onSuccess, onUpdate, onError } = callbacks; + + // current message + console.log('message', message); + + // history messages + console.log('messages', messages); + + let content: string = ''; + let reasoning_content: string = ''; + + try { + const stream = await client.chat.completions.create({ + model: DeepSeekR1, + // if chat context is needed, modify the array + messages: [{ role: 'user', content: message?.content as string }], + // stream mode + stream: true, + }); + + for await (const chunk of stream) { + const { reasoning_content: deltaReasoningContent, content: deltaContent } = (chunk + .choices[0]?.delta || {}) as YourMessage; + + // update reasoning content + if (deltaReasoningContent) { + reasoning_content += deltaReasoningContent; + } + + // update content + if (deltaContent) { + content += deltaContent; + } + + onUpdate({ + content, + reasoning_content, + }); + } + + onSuccess({ + content, + reasoning_content, + }); + } catch (error) { + // handle error + // onError(); + } + }, + }); + + const { + // use to send message + onRequest, + // use to render messages + messages, + } = useXChat({ agent }); + + const onSubmit = (value: string) => { + onRequest({ content: value, reasoning_content: '' }); + }; + + console.log(messages); + + const items: GetProp = messages.map(({ message, id }) => ({ + // key is required, used to identify the message + key: id, + messageRender() { + return ( + <> + {/** render reasoning content */} +
{message.reasoning_content}
+ {/** render content */} +
{message.content}
+ + ); + }, + })); + + return ( +
+ + +
+ ); +}; + +export default Demo; +```