Merge remote-tracking branch 'origin/main' into feat/agents-new

This commit is contained in:
Vaayne
2025-09-28 23:34:33 +08:00
22 changed files with 178 additions and 148 deletions

View File

@@ -2,8 +2,8 @@ name: Auto I18N
env: env:
API_KEY: ${{ secrets.TRANSLATE_API_KEY }} API_KEY: ${{ secrets.TRANSLATE_API_KEY }}
MODEL: ${{ vars.MODEL || 'deepseek/deepseek-v3.1'}} MODEL: ${{ vars.AUTO_I18N_MODEL || 'deepseek/deepseek-v3.1'}}
BASE_URL: ${{ vars.BASE_URL || 'https://api.ppinfra.com/openai'}} BASE_URL: ${{ vars.AUTO_I18N_BASE_URL || 'https://api.ppinfra.com/openai'}}
on: on:
pull_request: pull_request:

View File

@@ -99,9 +99,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NODE_OPTIONS: --max-old-space-size=8192 NODE_OPTIONS: --max-old-space-size=8192
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }} MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }} RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }} RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
- name: Build Mac - name: Build Mac
if: matrix.os == 'macos-latest' if: matrix.os == 'macos-latest'
@@ -110,15 +110,15 @@ jobs:
env: env:
CSC_LINK: ${{ secrets.CSC_LINK }} CSC_LINK: ${{ secrets.CSC_LINK }}
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }} CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
APPLE_ID: ${{ vars.APPLE_ID }} APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ vars.APPLE_APP_SPECIFIC_PASSWORD }} APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }} APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NODE_OPTIONS: --max-old-space-size=8192 NODE_OPTIONS: --max-old-space-size=8192
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }} MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }} RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }} RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
- name: Build Windows - name: Build Windows
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
@@ -128,9 +128,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NODE_OPTIONS: --max-old-space-size=8192 NODE_OPTIONS: --max-old-space-size=8192
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }} MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }} RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }} RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
- name: Rename artifacts with nightly format - name: Rename artifacts with nightly format
shell: bash shell: bash

View File

@@ -86,9 +86,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NODE_OPTIONS: --max-old-space-size=8192 NODE_OPTIONS: --max-old-space-size=8192
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }} MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }} RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }} RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
- name: Build Mac - name: Build Mac
if: matrix.os == 'macos-latest' if: matrix.os == 'macos-latest'
@@ -98,15 +98,15 @@ jobs:
env: env:
CSC_LINK: ${{ secrets.CSC_LINK }} CSC_LINK: ${{ secrets.CSC_LINK }}
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }} CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
APPLE_ID: ${{ vars.APPLE_ID }} APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ vars.APPLE_APP_SPECIFIC_PASSWORD }} APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }} APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NODE_OPTIONS: --max-old-space-size=8192 NODE_OPTIONS: --max-old-space-size=8192
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }} MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }} RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }} RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
- name: Build Windows - name: Build Windows
if: matrix.os == 'windows-latest' if: matrix.os == 'windows-latest'
@@ -116,9 +116,9 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NODE_OPTIONS: --max-old-space-size=8192 NODE_OPTIONS: --max-old-space-size=8192
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
MAIN_VITE_MINERU_API_KEY: ${{ vars.MAIN_VITE_MINERU_API_KEY }} MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }} RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
RENDERER_VITE_PPIO_APP_SECRET: ${{ vars.RENDERER_VITE_PPIO_APP_SECRET }} RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
- name: Release - name: Release
uses: ncipollo/release-action@v1 uses: ncipollo/release-action@v1

View File

@@ -1,5 +1,5 @@
diff --git a/dist/index.mjs b/dist/index.mjs diff --git a/dist/index.mjs b/dist/index.mjs
index 110f37ec18c98b1d55ae2b73cc716194e6f9094d..3ea0fadd783f334db71266e45babdcce11076974 100644 index 110f37ec18c98b1d55ae2b73cc716194e6f9094d..17e109b7778cbebb904f1919e768d21a2833d965 100644
--- a/dist/index.mjs --- a/dist/index.mjs
+++ b/dist/index.mjs +++ b/dist/index.mjs
@@ -448,7 +448,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) { @@ -448,7 +448,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
@@ -7,7 +7,7 @@ index 110f37ec18c98b1d55ae2b73cc716194e6f9094d..3ea0fadd783f334db71266e45babdcce
// src/get-model-path.ts // src/get-model-path.ts
function getModelPath(modelId) { function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`; - return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return `models/${modelId}`; + return modelId?.includes("models/") ? modelId : `models/${modelId}`;
} }
// src/google-generative-ai-options.ts // src/google-generative-ai-options.ts

View File

@@ -125,59 +125,7 @@ afterSign: scripts/notarize.js
artifactBuildCompleted: scripts/artifact-build-completed.js artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo: releaseInfo:
releaseNotes: | releaseNotes: |
<!--LANG:en--> Optimized note-taking feature, now able to quickly rename by modifying the title
🚀 New Features: Fixed issue where CherryAI free model could not be used
- Refactored AI core engine for more efficient and stable content generation Fixed issue where VertexAI proxy address could not be called normally
- Added support for multiple AI model providers: CherryIN, AiOnly Fixed issue where built-in tools from service providers could not be called normally
- Added API server functionality for external application integration
- Added PaddleOCR document recognition for enhanced document processing
- Added Anthropic OAuth authentication support
- Added data storage space limit notifications
- Added font settings for global and code fonts customization
- Added auto-copy feature after translation completion
- Added keyboard shortcuts: rename topic, edit last message, etc.
- Added text attachment preview for viewing file contents in messages
- Added custom window control buttons (minimize, maximize, close)
- Support for Qwen long-text (qwen-long) and document analysis (qwen-doc) models with native file uploads
- Support for Qwen image recognition models (Qwen-Image)
- Added iFlow CLI support
- Converted knowledge base and web search to tool-calling approach for better flexibility
🎨 UI Improvements & Bug Fixes:
- Integrated HeroUI and Tailwind CSS framework
- Optimized message notification styles with unified toast component
- Moved free models to bottom with fixed position for easier access
- Refactored quick panel and input bar tools for smoother operation
- Optimized responsive design for navbar and sidebar
- Improved scrollbar component with horizontal scrolling support
- Fixed multiple translation issues: paste handling, file processing, state management
- Various UI optimizations and bug fixes
<!--LANG:zh-CN-->
🚀 新功能:
- 重构 AI 核心引擎,提供更高效稳定的内容生成
- 新增多个 AI 模型提供商支持CherryIN、AiOnly
- 新增 API 服务器功能,支持外部应用集成
- 新增 PaddleOCR 文档识别,增强文档处理能力
- 新增 Anthropic OAuth 认证支持
- 新增数据存储空间限制提醒
- 新增字体设置,支持全局字体和代码字体自定义
- 新增翻译完成后自动复制功能
- 新增键盘快捷键:重命名主题、编辑最后一条消息等
- 新增文本附件预览,可查看消息中的文件内容
- 新增自定义窗口控制按钮(最小化、最大化、关闭)
- 支持通义千问长文本qwen-long和文档分析qwen-doc模型原生文件上传
- 支持通义千问图像识别模型Qwen-Image
- 新增 iFlow CLI 支持
- 知识库和网页搜索转换为工具调用方式,提升灵活性
🎨 界面改进与问题修复:
- 集成 HeroUI 和 Tailwind CSS 框架
- 优化消息通知样式,统一 toast 组件
- 免费模型移至底部固定位置,便于访问
- 重构快捷面板和输入栏工具,操作更流畅
- 优化导航栏和侧边栏响应式设计
- 改进滚动条组件,支持水平滚动
- 修复多个翻译问题:粘贴处理、文件处理、状态管理
- 各种界面优化和问题修复
<!--LANG:END-->

View File

@@ -34,6 +34,10 @@ export default defineConfig({
output: { output: {
manualChunks: undefined, // 彻底禁用代码分割 - 返回 null 强制单文件打包 manualChunks: undefined, // 彻底禁用代码分割 - 返回 null 强制单文件打包
inlineDynamicImports: true // 内联所有动态导入,这是关键配置 inlineDynamicImports: true // 内联所有动态导入,这是关键配置
},
onwarn(warning, warn) {
if (warning.code === 'COMMONJS_VARIABLE_IN_ESM') return
warn(warning)
} }
}, },
sourcemap: isDev sourcemap: isDev
@@ -112,6 +116,10 @@ export default defineConfig({
selectionToolbar: resolve(__dirname, 'src/renderer/selectionToolbar.html'), selectionToolbar: resolve(__dirname, 'src/renderer/selectionToolbar.html'),
selectionAction: resolve(__dirname, 'src/renderer/selectionAction.html'), selectionAction: resolve(__dirname, 'src/renderer/selectionAction.html'),
traceWindow: resolve(__dirname, 'src/renderer/traceWindow.html') traceWindow: resolve(__dirname, 'src/renderer/traceWindow.html')
},
onwarn(warning, warn) {
if (warning.code === 'COMMONJS_VARIABLE_IN_ESM') return
warn(warning)
} }
} }
}, },

View File

@@ -261,22 +261,39 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
return params return params
} }
context.mcpTools = params.tools // 分离 provider-defined 和其他类型的工具
const providerDefinedTools: ToolSet = {}
const promptTools: ToolSet = {}
// 构建系统提示符 for (const [toolName, tool] of Object.entries(params.tools as ToolSet)) {
if (tool.type === 'provider-defined') {
// provider-defined 类型的工具保留在 tools 参数中
providerDefinedTools[toolName] = tool
} else {
// 其他工具转换为 prompt 模式
promptTools[toolName] = tool
}
}
// 只有当有非 provider-defined 工具时才保存到 context
if (Object.keys(promptTools).length > 0) {
context.mcpTools = promptTools
}
// 构建系统提示符(只包含非 provider-defined 工具)
const userSystemPrompt = typeof params.system === 'string' ? params.system : '' const userSystemPrompt = typeof params.system === 'string' ? params.system : ''
const systemPrompt = buildSystemPrompt(userSystemPrompt, params.tools) const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools)
let systemMessage: string | null = systemPrompt let systemMessage: string | null = systemPrompt
if (config.createSystemMessage) { if (config.createSystemMessage) {
// 🎯 如果用户提供了自定义处理函数,使用它 // 🎯 如果用户提供了自定义处理函数,使用它
systemMessage = config.createSystemMessage(systemPrompt, params, context) systemMessage = config.createSystemMessage(systemPrompt, params, context)
} }
// 移除 tools改为 prompt 模式 // 保留 provider-defined tools移除其他 tools
const transformedParams = { const transformedParams = {
...params, ...params,
...(systemMessage ? { system: systemMessage } : {}), ...(systemMessage ? { system: systemMessage } : {}),
tools: undefined tools: Object.keys(providerDefinedTools).length > 0 ? providerDefinedTools : undefined
} }
context.originalParams = transformedParams context.originalParams = transformedParams
return transformedParams return transformedParams
@@ -285,8 +302,9 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
let textBuffer = '' let textBuffer = ''
// let stepId = '' // let stepId = ''
// 如果没有需要 prompt 模式处理的工具,直接返回原始流
if (!context.mcpTools) { if (!context.mcpTools) {
throw new Error('No tools available') return new TransformStream()
} }
// 从 context 中获取或初始化 usage 累加器 // 从 context 中获取或初始化 usage 累加器

View File

@@ -1,6 +1,7 @@
import { anthropic } from '@ai-sdk/anthropic' import { anthropic } from '@ai-sdk/anthropic'
import { google } from '@ai-sdk/google' import { google } from '@ai-sdk/google'
import { openai } from '@ai-sdk/openai' import { openai } from '@ai-sdk/openai'
import { InferToolInput, InferToolOutput } from 'ai'
import { ProviderOptionsMap } from '../../../options/types' import { ProviderOptionsMap } from '../../../options/types'
import { OpenRouterSearchConfig } from './openrouter' import { OpenRouterSearchConfig } from './openrouter'
@@ -58,24 +59,31 @@ export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
export type WebSearchToolOutputSchema = { export type WebSearchToolOutputSchema = {
// Anthropic 工具 - 手动定义 // Anthropic 工具 - 手动定义
anthropicWebSearch: Array<{ anthropic: InferToolOutput<ReturnType<typeof anthropic.tools.webSearch_20250305>>
url: string
title: string
pageAge: string | null
encryptedContent: string
type: string
}>
// OpenAI 工具 - 基于实际输出 // OpenAI 工具 - 基于实际输出
openaiWebSearch: { // TODO: 上游定义不规范,是unknown
// openai: InferToolOutput<ReturnType<typeof openai.tools.webSearch>>
openai: {
status: 'completed' | 'failed'
}
'openai-chat': {
status: 'completed' | 'failed' status: 'completed' | 'failed'
} }
// Google 工具 // Google 工具
googleSearch: { // TODO: 上游定义不规范,是unknown
// google: InferToolOutput<ReturnType<typeof google.tools.googleSearch>>
google: {
webSearchQueries?: string[] webSearchQueries?: string[]
groundingChunks?: Array<{ groundingChunks?: Array<{
web?: { uri: string; title: string } web?: { uri: string; title: string }
}> }>
} }
} }
export type WebSearchToolInputSchema = {
anthropic: InferToolInput<ReturnType<typeof anthropic.tools.webSearch_20250305>>
openai: InferToolInput<ReturnType<typeof openai.tools.webSearch>>
google: InferToolInput<ReturnType<typeof google.tools.googleSearch>>
'openai-chat': InferToolInput<ReturnType<typeof openai.tools.webSearchPreview>>
}

View File

@@ -178,14 +178,13 @@ export class AiSdkToChunkAdapter {
final.reasoningContent += chunk.text || '' final.reasoningContent += chunk.text || ''
this.onChunk({ this.onChunk({
type: ChunkType.THINKING_DELTA, type: ChunkType.THINKING_DELTA,
text: final.reasoningContent || '', text: final.reasoningContent || ''
thinking_millsec: (chunk.providerMetadata?.metadata?.thinking_millsec as number) || 0
}) })
break break
case 'reasoning-end': case 'reasoning-end':
this.onChunk({ this.onChunk({
type: ChunkType.THINKING_COMPLETE, type: ChunkType.THINKING_COMPLETE,
text: (chunk.providerMetadata?.metadata?.thinking_content as string) || final.reasoningContent text: final.reasoningContent || ''
}) })
final.reasoningContent = '' final.reasoningContent = ''
break break

View File

@@ -5,7 +5,6 @@ import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
import { Assistant } from '@renderer/types' import { Assistant } from '@renderer/types'
import { AiSdkMiddlewareConfig } from '../middleware/AiSdkMiddlewareBuilder' import { AiSdkMiddlewareConfig } from '../middleware/AiSdkMiddlewareBuilder'
import reasoningTimePlugin from './reasoningTimePlugin'
import { searchOrchestrationPlugin } from './searchOrchestrationPlugin' import { searchOrchestrationPlugin } from './searchOrchestrationPlugin'
import { createTelemetryPlugin } from './telemetryPlugin' import { createTelemetryPlugin } from './telemetryPlugin'
@@ -39,9 +38,9 @@ export function buildPlugins(
} }
// 3. 推理模型时添加推理插件 // 3. 推理模型时添加推理插件
if (middlewareConfig.enableReasoning) { // if (middlewareConfig.enableReasoning) {
plugins.push(reasoningTimePlugin) // plugins.push(reasoningTimePlugin)
} // }
// 4. 启用Prompt工具调用时添加工具插件 // 4. 启用Prompt工具调用时添加工具插件
if (middlewareConfig.isPromptToolUse) { if (middlewareConfig.isPromptToolUse) {

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

View File

@@ -1804,5 +1804,19 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
provider: 'aionly', provider: 'aionly',
group: 'gemini' group: 'gemini'
} }
],
longcat: [
{
id: 'LongCat-Flash-Chat',
name: 'LongCat Flash Chat',
provider: 'longcat',
group: 'LongCat'
},
{
id: 'LongCat-Flash-Thinking',
name: 'LongCat Flash Thinking',
provider: 'longcat',
group: 'LongCat'
}
] ]
} }

View File

@@ -27,6 +27,7 @@ import InfiniProviderLogo from '@renderer/assets/images/providers/infini.png'
import JinaProviderLogo from '@renderer/assets/images/providers/jina.png' import JinaProviderLogo from '@renderer/assets/images/providers/jina.png'
import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png' import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png'
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png' import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
import LongCatProviderLogo from '@renderer/assets/images/providers/longcat.png'
import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png' import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png'
import MistralProviderLogo from '@renderer/assets/images/providers/mistral.png' import MistralProviderLogo from '@renderer/assets/images/providers/mistral.png'
import ModelScopeProviderLogo from '@renderer/assets/images/providers/modelscope.png' import ModelScopeProviderLogo from '@renderer/assets/images/providers/modelscope.png'
@@ -630,6 +631,16 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
models: SYSTEM_MODELS['poe'], models: SYSTEM_MODELS['poe'],
isSystem: true, isSystem: true,
enabled: false enabled: false
},
longcat: {
id: 'longcat',
name: 'LongCat',
type: 'openai',
apiKey: '',
apiHost: 'https://api.longcat.chat/openai',
models: SYSTEM_MODELS.longcat,
isSystem: true,
enabled: false
} }
} as const } as const
@@ -692,7 +703,8 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
'new-api': NewAPIProviderLogo, 'new-api': NewAPIProviderLogo,
'aws-bedrock': AwsProviderLogo, 'aws-bedrock': AwsProviderLogo,
poe: 'poe', // use svg icon component poe: 'poe', // use svg icon component
aionly: AiOnlyProviderLogo aionly: AiOnlyProviderLogo,
longcat: LongCatProviderLogo
} as const } as const
export function getProviderLogo(providerId: string) { export function getProviderLogo(providerId: string) {
@@ -1298,6 +1310,17 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
docs: 'https://www.aiionly.com/document', docs: 'https://www.aiionly.com/document',
models: 'https://www.aiionly.com' models: 'https://www.aiionly.com'
} }
},
longcat: {
api: {
url: 'https://api.longcat.chat/openai'
},
websites: {
official: 'https://longcat.chat',
apiKey: 'https://longcat.chat/platform/api_keys',
docs: 'https://longcat.chat/platform/docs/zh/',
models: 'https://longcat.chat/platform/docs/zh/APIDocs.html'
}
} }
} }

View File

@@ -5,7 +5,7 @@ import { useSettings } from '@renderer/hooks/useSettings'
import { useTemporaryValue } from '@renderer/hooks/useTemporaryValue' import { useTemporaryValue } from '@renderer/hooks/useTemporaryValue'
import { MessageBlockStatus, type ThinkingMessageBlock } from '@renderer/types/newMessage' import { MessageBlockStatus, type ThinkingMessageBlock } from '@renderer/types/newMessage'
import { Collapse, message as antdMessage, Tooltip } from 'antd' import { Collapse, message as antdMessage, Tooltip } from 'antd'
import { memo, useCallback, useEffect, useMemo, useState } from 'react' import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import styled from 'styled-components' import styled from 'styled-components'
@@ -105,30 +105,37 @@ const ThinkingBlock: React.FC<Props> = ({ block }) => {
const ThinkingTimeSeconds = memo( const ThinkingTimeSeconds = memo(
({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => { ({ blockThinkingTime, isThinking }: { blockThinkingTime: number; isThinking: boolean }) => {
const { t } = useTranslation() const { t } = useTranslation()
// const [thinkingTime, setThinkingTime] = useState(blockThinkingTime || 0) const [displayTime, setDisplayTime] = useState(blockThinkingTime)
// FIXME: 这里统计的和请求处统计的有一定误差 const timer = useRef<NodeJS.Timeout | null>(null)
// useEffect(() => {
// let timer: NodeJS.Timeout | null = null
// if (isThinking) {
// timer = setInterval(() => {
// setThinkingTime((prev) => prev + 100)
// }, 100)
// } else if (timer) {
// // 立即清除计时器
// clearInterval(timer)
// timer = null
// }
// return () => { useEffect(() => {
// if (timer) { if (isThinking) {
// clearInterval(timer) if (!timer.current) {
// timer = null timer.current = setInterval(() => {
// } setDisplayTime((prev) => prev + 100)
// } }, 100)
// }, [isThinking]) }
} else {
if (timer.current) {
clearInterval(timer.current)
timer.current = null
}
setDisplayTime(blockThinkingTime)
}
const thinkingTimeSeconds = useMemo(() => (blockThinkingTime / 1000).toFixed(1), [blockThinkingTime]) return () => {
if (timer.current) {
clearInterval(timer.current)
timer.current = null
}
}
}, [isThinking, blockThinkingTime])
const thinkingTimeSeconds = useMemo(
() => ((displayTime < 1000 ? 100 : displayTime) / 1000).toFixed(1),
[displayTime]
)
return isThinking return isThinking
? t('chat.thinking', { ? t('chat.thinking', {

View File

@@ -235,13 +235,12 @@ describe('ThinkingBlock', () => {
renderThinkingBlock(thinkingBlock) renderThinkingBlock(thinkingBlock)
const activeTimeText = getThinkingTimeText() const activeTimeText = getThinkingTimeText()
expect(activeTimeText).toHaveTextContent('1.0s')
expect(activeTimeText).toHaveTextContent('Thinking...') expect(activeTimeText).toHaveTextContent('Thinking...')
}) })
it('should handle extreme thinking times correctly', () => { it('should handle extreme thinking times correctly', () => {
const testCases = [ const testCases = [
{ thinking_millsec: 0, expectedTime: '0.0s' }, { thinking_millsec: 0, expectedTime: '0.1s' }, // New logic: values < 1000ms display as 0.1s
{ thinking_millsec: 86400000, expectedTime: '86400.0s' }, // 1 day { thinking_millsec: 86400000, expectedTime: '86400.0s' }, // 1 day
{ thinking_millsec: 259200000, expectedTime: '259200.0s' } // 3 days { thinking_millsec: 259200000, expectedTime: '259200.0s' } // 3 days
] ]

View File

@@ -58,7 +58,7 @@ const MessageMcpTool: FC<Props> = ({ block }) => {
const [progress, setProgress] = useState<number>(0) const [progress, setProgress] = useState<number>(0)
const { setTimeoutTimer } = useTimer() const { setTimeoutTimer } = useTimer()
const toolResponse = block.metadata?.rawMcpToolResponse const toolResponse = block.metadata?.rawMcpToolResponse as MCPToolResponse
const { id, tool, status, response } = toolResponse as MCPToolResponse const { id, tool, status, response } = toolResponse as MCPToolResponse
const isPending = status === 'pending' const isPending = status === 'pending'

View File

@@ -37,12 +37,13 @@ const isAgentTool = (toolName: string) => {
const ChooseTool = (toolResponse: NormalToolResponse): React.ReactNode | null => { const ChooseTool = (toolResponse: NormalToolResponse): React.ReactNode | null => {
let toolName = toolResponse.tool.name let toolName = toolResponse.tool.name
const toolType = toolResponse.tool.type
if (toolName.startsWith(prefix)) { if (toolName.startsWith(prefix)) {
toolName = toolName.slice(prefix.length) toolName = toolName.slice(prefix.length)
switch (toolName) { switch (toolName) {
case 'web_search': case 'web_search':
case 'web_search_preview': case 'web_search_preview':
return <MessageWebSearchToolTitle toolResponse={toolResponse} /> return toolType === 'provider' ? null : <MessageWebSearchToolTitle toolResponse={toolResponse} />
case 'knowledge_search': case 'knowledge_search':
return <MessageKnowledgeSearchToolTitle toolResponse={toolResponse} /> return <MessageKnowledgeSearchToolTitle toolResponse={toolResponse} />
case 'memory_search': case 'memory_search':
@@ -58,7 +59,7 @@ const ChooseTool = (toolResponse: NormalToolResponse): React.ReactNode | null =>
export default function MessageTool({ block }: Props) { export default function MessageTool({ block }: Props) {
// FIXME: 语义错误,这里已经不是 MCP tool 了,更改rawMcpToolResponse需要改用户数据, 所以暂时保留 // FIXME: 语义错误,这里已经不是 MCP tool 了,更改rawMcpToolResponse需要改用户数据, 所以暂时保留
const toolResponse = block.metadata?.rawMcpToolResponse const toolResponse = block.metadata?.rawMcpToolResponse as NormalToolResponse
if (!toolResponse) return null if (!toolResponse) return null

View File

@@ -15,7 +15,7 @@ export const createThinkingCallbacks = (deps: ThinkingCallbacksDependencies) =>
// 内部维护的状态 // 内部维护的状态
let thinkingBlockId: string | null = null let thinkingBlockId: string | null = null
let _thinking_millsec = 0 let thinking_millsec_now: number = 0
return { return {
onThinkingStart: async () => { onThinkingStart: async () => {
@@ -24,27 +24,27 @@ export const createThinkingCallbacks = (deps: ThinkingCallbacksDependencies) =>
type: MessageBlockType.THINKING, type: MessageBlockType.THINKING,
content: '', content: '',
status: MessageBlockStatus.STREAMING, status: MessageBlockStatus.STREAMING,
thinking_millsec: _thinking_millsec thinking_millsec: 0
} }
thinkingBlockId = blockManager.initialPlaceholderBlockId! thinkingBlockId = blockManager.initialPlaceholderBlockId!
blockManager.smartBlockUpdate(thinkingBlockId, changes, MessageBlockType.THINKING, true) blockManager.smartBlockUpdate(thinkingBlockId, changes, MessageBlockType.THINKING, true)
} else if (!thinkingBlockId) { } else if (!thinkingBlockId) {
const newBlock = createThinkingBlock(assistantMsgId, '', { const newBlock = createThinkingBlock(assistantMsgId, '', {
status: MessageBlockStatus.STREAMING, status: MessageBlockStatus.STREAMING,
thinking_millsec: _thinking_millsec thinking_millsec: 0
}) })
thinkingBlockId = newBlock.id thinkingBlockId = newBlock.id
await blockManager.handleBlockTransition(newBlock, MessageBlockType.THINKING) await blockManager.handleBlockTransition(newBlock, MessageBlockType.THINKING)
} }
thinking_millsec_now = performance.now()
}, },
onThinkingChunk: async (text: string, thinking_millsec?: number) => { onThinkingChunk: async (text: string) => {
_thinking_millsec = thinking_millsec || 0
if (thinkingBlockId) { if (thinkingBlockId) {
const blockChanges: Partial<MessageBlock> = { const blockChanges: Partial<MessageBlock> = {
content: text, content: text,
status: MessageBlockStatus.STREAMING, status: MessageBlockStatus.STREAMING
thinking_millsec: _thinking_millsec // thinking_millsec: performance.now() - thinking_millsec_now
} }
blockManager.smartBlockUpdate(thinkingBlockId, blockChanges, MessageBlockType.THINKING) blockManager.smartBlockUpdate(thinkingBlockId, blockChanges, MessageBlockType.THINKING)
} }
@@ -52,14 +52,15 @@ export const createThinkingCallbacks = (deps: ThinkingCallbacksDependencies) =>
onThinkingComplete: (finalText: string) => { onThinkingComplete: (finalText: string) => {
if (thinkingBlockId) { if (thinkingBlockId) {
const now = performance.now()
const changes: Partial<MessageBlock> = { const changes: Partial<MessageBlock> = {
content: finalText, content: finalText,
status: MessageBlockStatus.SUCCESS, status: MessageBlockStatus.SUCCESS,
thinking_millsec: _thinking_millsec thinking_millsec: now - thinking_millsec_now
} }
blockManager.smartBlockUpdate(thinkingBlockId, changes, MessageBlockType.THINKING, true) blockManager.smartBlockUpdate(thinkingBlockId, changes, MessageBlockType.THINKING, true)
thinkingBlockId = null thinkingBlockId = null
_thinking_millsec = 0 thinking_millsec_now = 0
} else { } else {
logger.warn( logger.warn(
`[onThinkingComplete] Received thinking.complete but last block was not THINKING (was ${blockManager.lastBlockType}) or lastBlockId is null.` `[onThinkingComplete] Received thinking.complete but last block was not THINKING (was ${blockManager.lastBlockType}) or lastBlockId is null.`

View File

@@ -2570,6 +2570,7 @@ const migrateConfig = {
'158': (state: RootState) => { '158': (state: RootState) => {
try { try {
state.llm.providers = state.llm.providers.filter((provider) => provider.id !== 'cherryin') state.llm.providers = state.llm.providers.filter((provider) => provider.id !== 'cherryin')
addProvider(state, 'longcat')
return state return state
} catch (error) { } catch (error) {
logger.error('migrate 158 error', error as Error) logger.error('migrate 158 error', error as Error)

View File

@@ -425,7 +425,10 @@ describe('streamCallback Integration Tests', () => {
expect(thinkingBlock).toBeDefined() expect(thinkingBlock).toBeDefined()
expect(thinkingBlock?.content).toBe('Final thoughts') expect(thinkingBlock?.content).toBe('Final thoughts')
expect(thinkingBlock?.status).toBe(MessageBlockStatus.SUCCESS) expect(thinkingBlock?.status).toBe(MessageBlockStatus.SUCCESS)
expect((thinkingBlock as any)?.thinking_millsec).toBe(3000) // thinking_millsec 现在是本地计算的,只验证它存在且是一个合理的数字
expect((thinkingBlock as any)?.thinking_millsec).toBeDefined()
expect(typeof (thinkingBlock as any)?.thinking_millsec).toBe('number')
expect((thinkingBlock as any)?.thinking_millsec).toBeGreaterThanOrEqual(0)
}) })
it('should handle tool call flow', async () => { it('should handle tool call flow', async () => {

View File

@@ -334,7 +334,8 @@ export const SystemProviderIds = {
voyageai: 'voyageai', voyageai: 'voyageai',
'aws-bedrock': 'aws-bedrock', 'aws-bedrock': 'aws-bedrock',
poe: 'poe', poe: 'poe',
aionly: 'aionly' aionly: 'aionly',
longcat: 'longcat'
} as const } as const
export type SystemProviderId = keyof typeof SystemProviderIds export type SystemProviderId = keyof typeof SystemProviderIds

View File

@@ -169,13 +169,13 @@ __metadata:
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch": "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch":
version: 2.0.14 version: 2.0.14
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch::version=2.0.14&hash=c6aff2" resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch::version=2.0.14&hash=351f1a"
dependencies: dependencies:
"@ai-sdk/provider": "npm:2.0.0" "@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.9" "@ai-sdk/provider-utils": "npm:3.0.9"
peerDependencies: peerDependencies:
zod: ^3.25.76 || ^4 zod: ^3.25.76 || ^4
checksum: 10c0/2a0a09debab8de0603243503ff5044bd3fff87d6c5de2d76d43839fa459cc85d5412b59ec63d0dcf1a6d6cab02882eb3c69f0f155129d0fc153bcde4deecbd32 checksum: 10c0/1ed5a0732a82b981d51f63c6241ed8ee94d5c29a842764db770305cfc2f49ab6e528cac438b5357fc7b02194104c7b76d4390a1dc1d019ace9c174b0849e0da6
languageName: node languageName: node
linkType: hard linkType: hard