diff --git a/zh-CN/chat.json b/zh-CN/chat.json
index fa48c157..1b658ab0 100644
--- a/zh-CN/chat.json
+++ b/zh-CN/chat.json
@@ -4,7 +4,7 @@
"userRoleText": "用户",
"assistantRoleText": "助手",
"addMessageButtonText": "添加",
- "addMessageButtonText/toolTip": "在不触发预测的情况下将消息插入上下文中",
+ "addMessageButtonText/toolTip": "将消息插入上下文而不触发预测",
"sendMessageButtonText": "发送",
"sendMessageButtonText/toolTip": "将您的提示和对话历史发送给模型进行处理",
"ejectButtonText": "弹出",
@@ -13,26 +13,26 @@
"loadButtonText": "加载",
"advancedSegmentText": "高级",
"chatSegmentText": "聊天",
- "chatSidebarTitle": "聊天列表",
+ "chatSidebarTitle": "聊天记录",
"newChatButton": "新建聊天",
"newFolderButton": "新建文件夹",
- "viewModeLabel": "视图模式",
+ "viewModeLabel": "查看模式",
"noChatSelected": "未选择聊天",
"chatViewOptions": "聊天视图选项",
"uiControls/title": "显示设置",
- "noChatSelectedPlaceholder": "请选择一个聊天",
+ "noChatSelectedPlaceholder": "选择一个聊天",
"unnamedChat": "未命名聊天",
"emptyFolder": "文件夹为空",
- "tokenCount": "词元(token)数",
- "messageTokenCount": "输入词元(token)数",
- "tokenCount/hint": "消息中的词元(token)数量。使用当前选定模型的分词器计算。\n\n需要加载模型。",
- "messageTokenCount/hint": "消息中的词元(token)数量。使用当前选定模型的分词器计算。\n\n**不包括**附件中的词元(token)估计值。",
+ "tokenCount": "令牌数量",
+ "messageTokenCount": "输入令牌数量",
+ "tokenCount/hint": "消息中的令牌数量。使用当前选定模型的分词器计算。\n\n需要加载模型。",
+ "messageTokenCount/hint": "消息中的令牌数量。使用当前选定模型的分词器计算。\n\n**不包括**对文件附件中令牌的估计。",
"notes": "对话笔记",
"notes/add/first": "添加笔记",
- "notes/add/another": "再加一条笔记",
- "notes/hint": "保存此聊天的笔记。笔记仅供您参考,不会发送给模型。所有更改自动保存。",
+ "notes/add/another": "再添加一条笔记",
+ "notes/hint": "保存笔记到此聊天。笔记仅供您参考,不会发送给模型。所有更改会自动保存。",
"notes/placeholder": "在这里输入您的笔记...",
"notes/delete": "删除笔记",
"notes/noteLabel": "笔记",
@@ -40,96 +40,189 @@
"actions/sendMessage/error": "发送消息失败",
"actions/loadModel/error": "🥲 加载模型失败",
- "actions/addFile": "[实验性] 将文件附加到此消息\n(.pdf, 纯文本, 或 .docx)",
- "actions/changeRole": "在用户和助手角色之间切换。\n\n这对于引导对话朝特定方向发展非常有用。\n\n可用于构建‘少样本学习’或‘情境学习’场景",
+ "actions/addFile": "[实验性] 将文件附加到此消息\n(.pdf、纯文本或 .docx)",
+ "actions/changeRole": "在用户和助手角色之间切换。\n\n这有助于将对话引导到特定方向。\n\n可用于构建“少量学习”或“上下文学习”场景",
"actions/addImage": "添加图片",
"actions/deleteMessage": "删除消息",
- "actions/deleteMessage/confirmation": "您确定要删除这条消息吗?",
+ "actions/deleteMessage/confirmation": "您确定要删除此消息吗?",
"actions/copyMessage": "复制消息",
"actions/editMessage": "编辑消息",
- "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理的消息,因为它们在运行预处理器后会被覆盖。要编辑消息,您可以:\n\n - 切换到原始消息并对其进行编辑,或者\n - 更改预处理器,使其产生所需的输出。",
+ "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理的消息,因为它们会在运行预处理器后被覆盖。要编辑消息,请执行以下操作之一:\n\n - 切换到原始消息并编辑它,或者\n - 更改预处理器以生成所需的输出。",
"actions/regenerateMessage": "重新生成消息",
"actions/regenerateMessage/error": "重新生成消息失败",
- "actions/branchChat": "在此消息之后分支聊天",
+ "actions/branchChat": "在此消息后分支聊天",
"actions/branchChat/error": "分支聊天失败",
"actions/continueAssistantMessage": "继续助手消息",
"actions/continueAssistantMessage/error": "继续助手消息失败",
"actions/predictNext": "生成AI响应",
"actions/predictNext/error": "生成AI响应失败",
"actions/loadLastModel": "重新加载上次使用的模型",
- "actions/loadLastModel/tooltip": "点击以加载上次与该聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/loadLastModel/tooltip": "点击以加载上次与此聊天一起使用的模型:\n\n{{lastModel}}",
"actions/loadLastModel/error": "加载上次使用的模型失败。",
"actions/continueCurrentModel": "使用当前模型",
"actions/continueCurrentModel/tooltip": "当前模型:{{currentModel}}",
"actions/changeToLastUsedModel": "加载 {{lastModel}}",
"actions/changeToLastUsedModel/error": "切换到上次使用的模型失败。",
- "actions/changeToLastUsedModel/tooltip": "您上次在这个聊天中发送消息时使用了不同的模型。点击以卸载当前选定的模型({{currentModel}})并加载上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/changeToLastUsedModel/tooltip": "上次在此聊天中发送消息时使用了不同的模型。点击以卸载当前选定的模型 ({{currentModel}}) 并加载上次与此聊天一起使用的模型:\n\n{{lastModel}}",
"actions/switchToLastUsedModel": "切换到 {{lastModel}}",
- "actions/switchToLastUsedModel/tooltip": "点击以切换到上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/switchToLastUsedModel/tooltip": "点击以切换到上次与此聊天一起使用的模型:\n\n{{lastModel}}",
"actions/loadModel": "加载模型",
- "actions/toggleViewingProcessed/currentlyFalse": "当前查看的是原始消息。点击以查看预处理后的消息。",
- "actions/toggleViewingProcessed/currentlyTrue": "当前查看的是预处理后的消息。点击以查看原始消息。",
- "actions/toggleViewingProcessed/hint": "在消息发送给模型之前,它可能会被提示预处理器预处理。点击以切换查看原始消息和预处理后的消息。只有预处理后的消息会发送给模型。",
+ "actions/toggleViewingProcessed/currentlyFalse": "当前正在查看原始消息。点击以查看预处理后的消息。",
+ "actions/toggleViewingProcessed/currentlyTrue": "当前正在查看预处理后的消息。点击以查看原始消息。",
+ "actions/toggleViewingProcessed/hint": "在消息发送到模型之前,可能会由提示预处理器对其进行预处理。点击以在查看原始消息和预处理消息之间切换。只有预处理后的消息会被发送到模型。",
"editMessageConfirm/title": "保留更改?",
- "editMessageConfirm/message": "您已对消息进行了更改。您想要保留这些更改吗?",
+ "editMessageConfirm/message": "您已对该消息进行了更改。您想保留这些更改吗?",
"editMessageConfirm/keepEditing": "继续编辑",
"editMessageConfirm/save": "保存",
"editMessageConfirm/discard": "放弃更改",
- "tokenCount/totalNotAvailable": "词元(token):{{current}}",
- "tokenCount/totalAvailable": "词元(token):{{current}}/{{total}}",
+ "tokenCount/totalNotAvailable": "令牌:{{current}}",
+ "tokenCount/totalAvailable": "令牌:{{current}}/{{total}}",
"tokenCount/totalAvailablePercentage": "上下文已满 {{percentage}}%",
- "tokenCount/contextOverflow": "未经处理的上下文大于模型的最大词元(token)限制。根据您的上下文溢出策略,上下文可能会被截断,或者消息可能不会被发送。",
+ "tokenCount/contextOverflow": "未处理的上下文超出了模型的最大令牌限制。根据您的上下文溢出策略,上下文可能会被截断或消息可能无法发送。",
"modelLoader/manualLoadParams/label": "手动选择模型加载参数",
- "modelLoader/manualLoadParams/hint/before": "(或按住",
- "modelLoader/manualLoadParams/hint/after": ")",
+ "modelLoader/manualLoadParams/hint/before": "(或按住 ",
+ "modelLoader/manualLoadParams/hint/after": ")",
"actions/move/error": "移动失败",
"actions/rename/error": "重命名失败",
- "actions/createChatAtRoot": "新建聊天...",
+ "actions/createChatAtRoot": "新聊天...",
"actions/createChatAtRoot/error": "在根目录创建聊天失败",
- "actions/createFolderAtRoot": "新建文件夹...",
+ "actions/createFolderAtRoot": "新文件夹...",
"actions/createFolderAtRoot/error": "在根目录创建文件夹失败",
"actions/createChat/error": "创建聊天失败",
+ "actions/deleteChat/errorTitle": "删除聊天失败",
- "userFile/fileSizeLimit": "文件大小限制为",
- "userFile/noImageSupport": "模型不支持图片输入",
+ "userFile/fileSizeLimit": "文件大小限制为 ",
+ "userFile/noImageSupport": "模型不支持图像输入",
"userFile/errorPrefix": "错误 - ",
- "userFile/supportedImagePrefix": "不支持的图片类型 - 仅支持",
+ "userFile/supportedImagePrefix": "不支持的图像类型 - 仅支持 ",
"userFile/supportedImageSuffix": "。",
- "userFile/unsupportedFileType": "不支持的文件类型 - 仅支持图片、PDF 和 .txt 文件。",
+ "userFile/unsupportedFileType": "不支持的文件类型 - 仅支持图像、PDF 和 .txt 文件。",
"userFile/maxFilesPerMessage": "每条消息的最大文件数已达到。不能添加超过 {{files}} 个文件。",
- "userFile/maxFileSizePerMessage": "每条消息的最大文件大小已达到。不能添加超过 {{size}} 的文件。",
+ "userFile/maxFileSizePerMessage": "每条消息的最大文件大小已达到。不能添加大于 {{size}} 的文件。",
+ "userFile/maxFileSizePerConversation": "每次对话的最大文件大小已达到。不能添加大于 {{size}} 的文件。",
+ "userFile/failedToUploadError/title": "上传文件失败",
+ "userFile/failedToAddFile/title": "添加文件到聊天失败",
"errorTitle": "错误",
+ "userFile/chatTerminalDocumentsCount_one": "{{count}} 个文档在聊天中",
+ "userFile/chatTerminalDocumentsCount_other": "{{count}} 个文档在聊天中",
- "prediction/busyModel/title": "模型忙碌中",
- "prediction/busyModel/message": "请等待模型完成后再试",
+ "prediction/busyModel/title": "模型正忙",
+ "prediction/busyModel/message": "请等待模型完成后再试一次",
"prediction/noModel/title": "未选择模型",
"prediction/modelLoading": "消息已排队,将在模型加载完成后发送",
"prediction/noModel/message": "选择一个模型以发送消息",
"prediction/unloadModel/error": "卸载模型失败",
"retrieval/user/processingLabel": "AI 正在思考...",
- "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击以展开。",
- "retrieval/actions/clickToExpand": "点击以展开中间步骤",
- "retrieval/actions/clickToCollapse": "点击以折叠中间步骤",
+ "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击展开。",
+ "retrieval/actions/clickToExpand": "点击展开中间步骤",
+ "retrieval/actions/clickToCollapse": "点击折叠中间步骤",
- "style": "聊天外观",
+ "style": "外观",
"style/viewMode/markdown": "Markdown",
"style/viewMode/plaintext": "纯文本",
"style/viewMode/monospace": "等宽字体",
+ "speculativeDecodingVisualization/toggle": "可视化接受的草稿令牌",
+ "speculativeDecodingVisualization/fromDraftModel_one": "接受的草稿令牌",
+ "speculativeDecodingVisualization/fromDraftModel_other": "接受的草稿令牌",
+ "speculativeDecodingVisualization/cannotChangeViewMode": "在可视化草稿令牌时禁用视图模式选择。",
+
"style/fontSize/label": "字体大小",
"style/fontSize/medium": "默认",
"style/fontSize/large": "大",
"style/fontSize/small": "小",
- "topBarActions/duplicateChat": "复制聊天",
- "topBarActions/clearChat": "清除所有消息",
+ "style/debugBlocks/label": "显示调试信息块",
+
+ "style/thinkingUI/label": "默认展开推理块",
+ "style/chatFullWidth/label": "将聊天容器扩展到窗口宽度",
+
+ "messageBlocks": {
+ "expandBlockTooltip": "展开内容",
+ "collapseBlockTooltip": "折叠内容",
+ "debug": {
+ "label": "调试信息",
+ "collapseTooltip": "折叠调试信息块",
+ "expandTooltip": "展开调试信息块"
+ }
+ },
+
+ "topBarActions/duplicateChat": "复制",
+ "topBarActions/clearChat": "全部清除",
"topBarActions/clearChatConfirmation": "您确定要清除此聊天中的所有消息吗?",
"topBarActions/clearChatCancel": "取消",
"topBarActions/clearChatDelete": "全部清除",
- "noModels.indexing": "正在索引模型文件...(这可能需要一段时间)",
+ "noModels.indexing": "正在索引模型文件...(这可能需要一点时间)",
"noModels.downloading": "正在下载您的第一个LLM...",
- "noModels": "还没有LLM!下载一个开始吧!"
+ "noModels": "还没有LLM!下载一个开始吧!",
+
+ "plugins": {
+ "pluginTrigger": {
+ "noPlugins": "插件",
+ "multiplePlugins": "{{dynamicValue}} 个插件"
+ },
+ "pluginSelect": {
+ "title": "插件",
+ "dropdown": {
+ "configure": "配置",
+ "disable": "禁用",
+ "fork": "分支",
+ "uninstall": "卸载"
+ },
+ "actionButtons": {
+ "create": "+ 创建",
+ "import": "导入",
+ "discover": "发现"
+ },
+ "recentlyCreated": {
+ "title": "最近创建的插件",
+ "placeholder": "您创建的插件将出现在这里"
+ },
+ "startRunningDevelopmentPlugin/error": "启动开发模式下的插件失败",
+ "stopRunningDevelopmentPlugin/error": "停止开发模式下的插件失败"
+ },
+ "pluginConfiguration": {
+ "title": "插件配置",
+ "selectAPlugin": "选择一个插件以编辑其配置",
+ "preprocessorAndGenerator": "此插件包含自定义预处理器和生成器",
+ "generatorOnly": "此插件包含自定义生成器",
+ "preprocessorOnly": "此插件包含自定义预处理器"
+ },
+ "instructions": {
+ "runTheFollowing": "要运行您的插件,请打开终端并输入",
+ "pushTo": "通过推送到Hub与其他用户分享您的插件(可选)",
+ "createdSuccessfully": "插件创建成功",
+ "creatingPlugin": "正在创建插件...",
+ "projectFilesTitle": "项目文件",
+ "buttons": {
+ "documentation": "文档",
+ "dismiss": "关闭",
+ "publish": "发布",
+ "openInZed": "在 Zed 中打开",
+ "openInVscode": "在 VS Code 中打开",
+ "revealInFinder": "在 Finder 中显示",
+ "openInFileExplorer": "在文件资源管理器中打开"
+ }
+ }
+ },
+
+ "genInfo": {
+ "tokensPerSecond": "{{tokensPerSecond}} 个令牌/秒",
+ "predictedTokensCount": "{{predictedTokensCount}} 个令牌",
+ "timeToFirstTokenSec": "{{timeToFirstTokenSec}} 秒生成首个令牌",
+ "stopReason": "停止原因: {{stopReason}}",
+ "stopReason.userStopped": "用户停止",
+ "stopReason.modelUnloaded": "模型已卸载",
+ "stopReason.failed": "生成失败",
+ "stopReason.eosFound": "找到结束令牌 (EOS)",
+ "stopReason.stopStringFound": "找到停止字符串",
+ "stopReason.toolCalls": "工具调用",
+ "stopReason.maxPredictedTokensReached": "达到最大预测令牌数",
+ "stopReason.contextLengthReached": "达到上下文长度限制",
+ "speculativeDecodedBy": "草稿模型: {{decodedBy}}",
+ "speculativeDecodingStats": "接受 {{accepted}}/{{total}} 个草稿令牌 ({{percentage}}%)"
+ }
}
diff --git a/zh-CN/config.json b/zh-CN/config.json
index bbaf494e..3588db96 100644
--- a/zh-CN/config.json
+++ b/zh-CN/config.json
@@ -4,212 +4,325 @@
"showAdvancedSettings": "显示高级设置",
"showAll": "全部",
"basicSettings": "基础",
- "configSubtitle": "加载或保存预设并尝试模型参数覆盖",
+ "configSubtitle": "加载或保存预设并尝试覆盖模型参数",
"inferenceParameters/title": "预测参数",
"inferenceParameters/info": "尝试影响预测的参数。",
- "generalParameters/title": "通用",
+ "generalParameters/title": "常规",
"samplingParameters/title": "采样",
"basicTab": "基础",
"advancedTab": "高级",
"advancedTab/title": "🧪 高级配置",
- "advancedTab/expandAll": "展开所有",
+ "advancedTab/expandAll": "全部展开",
"advancedTab/overridesTitle": "配置覆盖",
- "advancedTab/noConfigsText": "您没有未保存的更改 - 编辑上方值以在此处查看覆盖。",
+ "advancedTab/noConfigsText": "您没有任何未保存的更改 - 编辑上方值以在此处查看覆盖。",
"loadInstanceFirst": "加载模型以查看可配置参数",
"noListedConfigs": "无可配置参数",
- "generationParameters/info": "尝试影响文本生成的基础参数。",
+ "generationParameters/info": "尝试影响文本生成的基本参数。",
"loadParameters/title": "加载参数",
- "loadParameters/description": "控制模型初始化和加载到内存的方式的设置。",
+ "loadParameters/description": "控制模型初始化和加载到内存中的方式的设置。",
"loadParameters/reload": "重新加载以应用更改",
+ "loadParameters/reload/error": "无法重新加载模型",
"discardChanges": "放弃更改",
"loadModelToSeeOptions": "加载模型以查看选项",
+ "schematicsError.title": "配置架构在以下字段中包含错误:",
+ "manifestSections": {
+ "structuredOutput/title": "结构化输出",
+ "speculativeDecoding/title": "推测解码",
+ "sampling/title": "采样",
+ "settings/title": "设置",
+ "toolUse/title": "工具使用",
+ "promptTemplate/title": "提示模板"
+ },
+
"llm.prediction.systemPrompt/title": "系统提示",
- "llm.prediction.systemPrompt/description": "使用此字段向模型提供背景指令,如一套规则、约束或一般要求。",
- "llm.prediction.systemPrompt/subTitle": "AI 指南",
+ "llm.prediction.systemPrompt/description": "使用此字段为模型提供背景说明,例如一组规则、约束或一般要求。",
+ "llm.prediction.systemPrompt/subTitle": "AI指南",
"llm.prediction.temperature/title": "温度",
- "llm.prediction.temperature/subTitle": "引入多少随机性。0 将始终产生相同的结果,而较高值将增加创造性和变化。",
- "llm.prediction.temperature/info": "来自 llama.cpp 帮助文档:\"默认值为 <{{dynamicValue}}>,它在随机性和确定性之间提供了平衡。极端情况下,温度为 0 会始终选择最可能的下一个词元(token),导致每次运行的输出相同\"",
+ "llm.prediction.temperature/subTitle": "引入多少随机性。0将每次产生相同的结果,而较高的值将增加创造性和差异性",
+ "llm.prediction.temperature/info": "来自llama.cpp帮助文档:\"默认值是 ,它在随机性和确定性之间提供了平衡。极端情况下,温度为0将始终选择最可能的下一个令牌,导致每次运行的输出相同\"",
"llm.prediction.llama.sampling/title": "采样",
"llm.prediction.topKSampling/title": "Top K 采样",
- "llm.prediction.topKSampling/subTitle": "将下一个词元(token)限制为模型预测的前 k 个最可能的词元(token)。作用类似于温度",
- "llm.prediction.topKSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-k 采样是一种仅从模型预测的前 k 个最可能的词元(token)中选择下一个词元(token)的文本生成方法。\n\n它有助于减少生成低概率或无意义词元(token)的风险,但也可能限制输出的多样性。\n\n更高的 top-k 值(例如,100)将考虑更多词元(token),从而生成更多样化的文本,而较低的值(例如,10)将专注于最可能的词元(token),生成更保守的文本。\n\n• 默认值为 <{{dynamicValue}}>",
- "llm.prediction.llama.cpuThreads/title": "CPU 线程",
- "llm.prediction.llama.cpuThreads/subTitle": "推理期间使用的 CPU 线程数",
- "llm.prediction.llama.cpuThreads/info": "计算期间要使用的线程数。增加线程数并不总是与更好的性能相关联。默认值为 <{{dynamicValue}}>。",
+ "llm.prediction.topKSampling/subTitle": "将下一个令牌限制为前k个最可能的令牌之一。作用类似于温度",
+ "llm.prediction.topKSampling/info": "来自llama.cpp帮助文档:\n\nTop-k采样是一种文本生成方法,仅从模型预测的前k个最可能的令牌中选择下一个令牌。\n\n它有助于减少生成低概率或无意义令牌的风险,但也可能限制输出的多样性。\n\ntop-k的较高值(例如100)将考虑更多的令牌并导致更多样化的文本,而较低的值(例如10)将关注最可能的令牌并生成更保守的文本。\n\n• 默认值是 ",
+ "llm.prediction.llama.cpuThreads/title": "CPU线程数",
+ "llm.prediction.llama.cpuThreads/subTitle": "推理期间使用的CPU线程数",
+ "llm.prediction.llama.cpuThreads/info": "计算期间使用的线程数。增加线程数并不总是与更好的性能相关。默认值是 。",
"llm.prediction.maxPredictedTokens/title": "限制响应长度",
- "llm.prediction.maxPredictedTokens/subTitle": "可选地限制 AI 响应的长度",
- "llm.prediction.maxPredictedTokens/info": "控制聊天机器人的响应最大长度。开启以设置响应的最大长度限制,或关闭以让聊天机器人决定何时停止。",
- "llm.prediction.maxPredictedTokens/inputLabel": "最大响应长度(词元(token))",
- "llm.prediction.maxPredictedTokens/wordEstimate": "约 {{maxWords}} 词",
+ "llm.prediction.maxPredictedTokens/subTitle": "可选地限制AI响应的长度",
+ "llm.prediction.maxPredictedTokens/info": "控制聊天机器人的响应最大长度。打开以设置响应的最大长度限制,或关闭以让聊天机器人决定何时停止。",
+ "llm.prediction.maxPredictedTokens/inputLabel": "最大响应长度(令牌)",
+ "llm.prediction.maxPredictedTokens/wordEstimate": "大约 {{maxWords}} 字",
"llm.prediction.repeatPenalty/title": "重复惩罚",
- "llm.prediction.repeatPenalty/subTitle": "多大程度上避免重复相同的词元(token)",
- "llm.prediction.repeatPenalty/info": "来自 llama.cpp 帮助文档:\"有助于防止模型生成重复或单调的文本。\n\n更高的值(例如,1.5)将更强烈地惩罚重复,而更低的值(例如,0.9)将更为宽容。\" • 默认值为 <{{dynamicValue}}>",
- "llm.prediction.minPSampling/title": "最小 P 采样",
- "llm.prediction.minPSampling/subTitle": "词元(token)被选为输出的最低基本概率",
- "llm.prediction.minPSampling/info": "来自 llama.cpp 帮助文档:\n\n相对于最可能词元(token)的概率,词元(token)被视为考虑的最低概率。必须在 [0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.repeatPenalty/subTitle": "对重复同一令牌的抑制程度",
+ "llm.prediction.repeatPenalty/info": "来自llama.cpp帮助文档:\"有助于防止模型生成重复或单调的文本。\n\n较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如0.9)将更加宽容。\" • 默认值是 ",
+ "llm.prediction.minPSampling/title": "Min P 采样",
+ "llm.prediction.minPSampling/subTitle": "选择输出令牌所需的最低基本概率",
+ "llm.prediction.minPSampling/info": "来自llama.cpp帮助文档:\n\n相对于最可能令牌的概率,考虑令牌的最低概率。必须在[0,1]范围内。\n\n• 默认值是 ",
"llm.prediction.topPSampling/title": "Top P 采样",
- "llm.prediction.topPSampling/subTitle": "可能的下一个词元(token)的最小累积概率。作用类似于温度",
- "llm.prediction.topPSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-p 采样,也称为核心采样,是另一种文本生成方法,从累积概率至少为 p 的词元(token)子集中选择下一个词元(token)。\n\n这种方法通过同时考虑词元(token)的概率和要从中采样的词元(token)数量,在多样性和质量之间提供了平衡。\n\n更高的 top-p 值(例如,0.95)将导致更多样化的文本,而较低的值(例如,0.5)将生成更集中和保守的文本。必须在 (0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.topPSampling/subTitle": "可能的下一个令牌的最小累积概率。作用类似于温度",
+ "llm.prediction.topPSampling/info": "来自llama.cpp帮助文档:\n\nTop-p采样,也称为核采样,是另一种文本生成方法,从累积概率至少为p的子集令牌中选择下一个令牌。\n\n该方法通过同时考虑令牌的概率和要采样的令牌数量,在多样性和质量之间提供平衡。\n\ntop-p的较高值(例如0.95)将导致更多样化的文本,而较低的值(例如0.5)将生成更专注和保守的文本。必须在(0,1]范围内。\n\n• 默认值是 ",
"llm.prediction.stopStrings/title": "停止字符串",
- "llm.prediction.stopStrings/subTitle": "应该停止模型生成更多词元(token)的字符串",
- "llm.prediction.stopStrings/info": "遇到特定字符串时将停止模型生成更多词元(token)",
- "llm.prediction.stopStrings/placeholder": "输入一个字符串并按 ⏎",
+ "llm.prediction.stopStrings/subTitle": "应停止模型生成更多令牌的字符串",
+ "llm.prediction.stopStrings/info": "当遇到特定字符串时将停止模型生成更多令牌",
+ "llm.prediction.stopStrings/placeholder": "输入字符串并按 ⏎",
"llm.prediction.contextOverflowPolicy/title": "上下文溢出",
- "llm.prediction.contextOverflowPolicy/subTitle": "当对话超出模型处理能力时,模型应该如何表现",
- "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型的工作内存('上下文')大小时该怎么做",
+ "llm.prediction.contextOverflowPolicy/subTitle": "当对话变得过大以至于模型无法处理时,模型应如何表现",
+ "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型工作内存('上下文')大小时应采取的措施",
"llm.prediction.llama.frequencyPenalty/title": "频率惩罚",
"llm.prediction.llama.presencePenalty/title": "存在惩罚",
"llm.prediction.llama.tailFreeSampling/title": "尾部自由采样",
"llm.prediction.llama.locallyTypicalSampling/title": "局部典型采样",
+ "llm.prediction.llama.xtcProbability/title": "XTC 采样概率",
+ "llm.prediction.llama.xtcProbability/subTitle": "每个生成的令牌激活XTC(排除顶级选择)采样的概率。XTC采样可以提高创造力并减少陈词滥调",
+ "llm.prediction.llama.xtcProbability/info": "每个生成的令牌激活XTC(排除顶级选择)采样的概率。XTC采样通常提高创造力并减少陈词滥调",
+ "llm.prediction.llama.xtcThreshold/title": "XTC 采样阈值",
+ "llm.prediction.llama.xtcThreshold/subTitle": "XTC(排除顶级选择)阈值。有`xtc-probability`的机会,搜索概率在`xtc-threshold`和0.5之间的令牌,并移除所有这样的令牌,除了概率最低的一个",
+ "llm.prediction.llama.xtcThreshold/info": "XTC(排除顶级选择)阈值。有`xtc-probability`的机会,搜索概率在`xtc-threshold`和0.5之间的令牌,并移除所有这样的令牌,除了概率最低的一个",
+ "llm.prediction.mlx.topKSampling/title": "Top K 采样",
+ "llm.prediction.mlx.topKSampling/subTitle": "将下一个令牌限制为前k个最可能的令牌之一。作用类似于温度",
+ "llm.prediction.mlx.topKSampling/info": "将下一个令牌限制为前k个最可能的令牌之一。作用类似于温度",
"llm.prediction.onnx.topKSampling/title": "Top K 采样",
- "llm.prediction.onnx.topKSampling/subTitle": "将下一个词元(token)限制为前 k 个最可能的词元(token)。作用类似于温度",
- "llm.prediction.onnx.topKSampling/info": "来自 ONNX 文档:\n\n保留最高概率词汇表词元(token)的数量以进行 top-k 过滤\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.onnx.topKSampling/subTitle": "将下一个令牌限制为前k个最可能的令牌之一。作用类似于温度",
+ "llm.prediction.onnx.topKSampling/info": "来自ONNX文档:\n\n用于top-k过滤的最高概率词汇表令牌的数量\n\n• 此过滤器默认关闭",
"llm.prediction.onnx.repeatPenalty/title": "重复惩罚",
- "llm.prediction.onnx.repeatPenalty/subTitle": "多大程度上避免重复相同的词元(token)",
- "llm.prediction.onnx.repeatPenalty/info": "更高的值阻止模型重复自身",
+ "llm.prediction.onnx.repeatPenalty/subTitle": "对重复同一令牌的抑制程度",
+ "llm.prediction.onnx.repeatPenalty/info": "较高的值会阻止模型重复自身",
"llm.prediction.onnx.topPSampling/title": "Top P 采样",
- "llm.prediction.onnx.topPSampling/subTitle": "可能的下一个词元(token)的最小累积概率。作用类似于温度",
- "llm.prediction.onnx.topPSampling/info": "来自 ONNX 文档:\n\n仅保留累积概率达到或超过 TopP 的最可能词元(token)用于生成\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.onnx.topPSampling/subTitle": "可能的下一个令牌的最小累积概率。作用类似于温度",
+ "llm.prediction.onnx.topPSampling/info": "来自ONNX文档:\n\n只有累积概率至少为TopP的最高概率令牌才会保留用于生成\n\n• 此过滤器默认关闭",
"llm.prediction.seed/title": "种子",
"llm.prediction.structured/title": "结构化输出",
"llm.prediction.structured/info": "结构化输出",
- "llm.prediction.structured/description": "高级:您可以提供一个 JSON 模式来强制模型输出特定格式。阅读[文档](https://lmstudio.ai/docs/advanced/structured-output)了解更多信息",
+ "llm.prediction.structured/description": "高级:您可以提供一个[JSON Schema](https://json-schema.org/learn/miscellaneous-examples)以强制模型输出特定格式。阅读[文档](https://lmstudio.ai/docs/advanced/structured-output)了解更多",
+ "llm.prediction.tools/title": "工具使用",
+ "llm.prediction.tools/description": "高级:您可以提供一个符合JSON的工具列表供模型请求调用。阅读[文档](https://lmstudio.ai/docs/advanced/tool-use)了解更多",
+ "llm.prediction.tools/serverPageDescriptionAddon": "通过请求体作为`tools`传递,当使用服务器API时",
"llm.prediction.promptTemplate/title": "提示模板",
- "llm.prediction.promptTemplate/subTitle": "聊天中消息发送给模型的格式。更改此设置可能会引入意外行为 - 确保您知道自己在做什么!",
+ "llm.prediction.promptTemplate/subTitle": "聊天消息发送到模型的格式。更改此内容可能会引入意外行为 - 确保您知道自己在做什么!",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/title": "生成的草稿令牌数",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/subTitle": "主模型每个令牌生成的草稿模型令牌数。找到计算与奖励之间的最佳点",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/title": "草稿概率截止",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/subTitle": "继续起草直到令牌概率低于此阈值。较高的值通常意味着风险较低,回报较低",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/title": "最小草稿大小",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/subTitle": "小于这个大小的草稿将被主模型忽略。较高的值通常意味着风险较低,回报较低",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/title": "最大草稿大小",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/subTitle": "草稿中允许的最大令牌数。如果所有令牌概率都大于截止值,则为上限。较低的值通常意味着风险较低,回报较低",
+ "llm.prediction.speculativeDecoding.draftModel/title": "草稿模型",
+ "llm.prediction.reasoning.parsing/title": "推理部分解析",
+ "llm.prediction.reasoning.parsing/subTitle": "如何解析模型输出中的推理部分",
"llm.load.contextLength/title": "上下文长度",
- "llm.load.contextLength/subTitle": "模型可以一次性关注的词元(token)最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
- "llm.load.contextLength/info": "指定模型一次可以考虑的最大词元(token)数量,影响其处理过程中保留的上下文量",
- "llm.load.contextLength/warning": "设置较高的上下文长度值会对内存使用产生显著影响",
+ "llm.load.contextLength/subTitle": "模型在一个提示中可以关注的最大令牌数。有关管理此功能的更多信息,请参阅“推理参数”下的对话溢出选项",
+ "llm.load.contextLength/info": "指定模型一次可以考虑的最大令牌数,影响其在处理过程中的上下文保留能力",
+ "llm.load.contextLength/warning": "为上下文长度设置高值会显著影响内存使用",
"llm.load.seed/title": "种子",
- "llm.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机",
- "llm.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+ "llm.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1为随机",
+ "llm.load.seed/info": "随机种子:设置随机数生成的种子以确保结果可重现",
- "llm.load.llama.evalBatchSize/title": "评估批处理大小",
- "llm.load.llama.evalBatchSize/subTitle": "每次处理的输入词元(token)数量。增加此值会提高性能,但会增加内存使用量",
- "llm.load.llama.evalBatchSize/info": "设置评估期间一起处理的示例数量,影响速度和内存使用",
- "llm.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
- "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
+ "llm.load.llama.evalBatchSize/title": "评估批次大小",
+ "llm.load.llama.evalBatchSize/subTitle": "一次处理的输入令牌数。增加此值会提高性能但会增加内存使用",
+ "llm.load.llama.evalBatchSize/info": "设置评估期间一批处理的示例数量,影响速度和内存使用",
+ "llm.load.llama.ropeFrequencyBase/title": "RoPE频率基",
+ "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下实现更好的性能",
"llm.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
- "llm.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
- "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
- "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
- "llm.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
- "llm.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
- "llm.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
- "llm.load.llama.flashAttention/title": "闪电注意力",
- "llm.load.llama.flashAttention/subTitle": "降低某些模型的内存使用量和生成时间",
- "llm.load.llama.flashAttention/info": "加速注意力机制,实现更快、更高效的处理",
+ "llm.load.llama.ropeFrequencyScale/title": "RoPE频率比例",
+ "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度通过此因子缩放以使用RoPE扩展有效上下文",
+ "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率比例以控制位置编码粒度",
+ "llm.load.llama.acceleration.offloadRatio/title": "GPU卸载",
+ "llm.load.llama.acceleration.offloadRatio/subTitle": "用于GPU加速的离散模型层数",
+ "llm.load.llama.acceleration.offloadRatio/info": "设置要卸载到GPU的层数。",
+ "llm.load.llama.flashAttention/title": "快速注意力",
+ "llm.load.llama.flashAttention/subTitle": "在某些模型上减少内存使用和生成时间",
+ "llm.load.llama.flashAttention/info": "加速注意力机制以实现更快和更高效的处理",
"llm.load.numExperts/title": "专家数量",
"llm.load.numExperts/subTitle": "模型中使用的专家数量",
"llm.load.numExperts/info": "模型中使用的专家数量",
"llm.load.llama.keepModelInMemory/title": "保持模型在内存中",
- "llm.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
- "llm.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
- "llm.load.llama.useFp16ForKVCache/title": "使用 FP16 用于 KV 缓存",
- "llm.load.llama.useFp16ForKVCache/info": "通过以半精度(FP16)存储缓存来减少内存使用",
- "llm.load.llama.tryMmap/title": "尝试 mmap()",
- "llm.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "llm.load.llama.keepModelInMemory/subTitle": "即使卸载到GPU,也要为模型保留系统内存。提高性能但需要更多系统RAM",
+ "llm.load.llama.keepModelInMemory/info": "防止模型被换出到磁盘,确保更快访问,但代价是更高的RAM使用",
+ "llm.load.llama.useFp16ForKVCache/title": "使用FP16用于KV缓存",
+ "llm.load.llama.useFp16ForKVCache/info": "通过半精度(FP16)存储缓存来减少内存使用",
+ "llm.load.llama.tryMmap/title": "尝试mmap()",
+ "llm.load.llama.tryMmap/subTitle": "改进模型的加载时间。禁用此功能可能在模型大于可用系统RAM时提高性能",
"llm.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
+ "llm.load.llama.cpuThreadPoolSize/title": "CPU线程池大小",
+ "llm.load.llama.cpuThreadPoolSize/subTitle": "分配给用于模型计算的线程池的CPU线程数",
+ "llm.load.llama.cpuThreadPoolSize/info": "分配给用于模型计算的线程池的CPU线程数。增加线程数并不总是与更好的性能相关。默认值是 。",
+ "llm.load.llama.kCacheQuantizationType/title": "K缓存量化类型",
+ "llm.load.llama.kCacheQuantizationType/subTitle": "较低的值减少内存使用但可能降低质量。效果因模型而异。",
+ "llm.load.llama.vCacheQuantizationType/title": "V缓存量化类型",
+ "llm.load.llama.vCacheQuantizationType/subTitle": "较低的值减少内存使用但可能降低质量。效果因模型而异。",
+ "llm.load.llama.vCacheQuantizationType/turnedOnWarning": "⚠️ 如果未启用闪存注意力,必须禁用此值",
+ "llm.load.llama.vCacheQuantizationType/disabledMessage": "只有在启用闪存注意力时才能开启",
+ "llm.load.llama.vCacheQuantizationType/invalidF32MetalState": "⚠️ 使用F32时必须禁用闪存注意力",
+ "llm.load.mlx.kvCacheBits/title": "KV缓存量化",
+ "llm.load.mlx.kvCacheBits/subTitle": "KV缓存应量化的比特数",
+ "llm.load.mlx.kvCacheBits/info": "KV缓存应量化的比特数",
+ "llm.load.mlx.kvCacheBits/turnedOnWarning": "使用KV缓存量化时忽略上下文长度设置",
+ "llm.load.mlx.kvCacheGroupSize/title": "KV缓存量化:组大小",
+ "llm.load.mlx.kvCacheGroupSize/subTitle": "KV缓存量化的组大小。较高的组大小减少内存使用但可能降低质量",
+ "llm.load.mlx.kvCacheGroupSize/info": "KV缓存应量化的比特数",
+ "llm.load.mlx.kvCacheQuantizationStart/title": "KV缓存量化:ctx越过此长度时开始量化",
+ "llm.load.mlx.kvCacheQuantizationStart/subTitle": "开始量化KV缓存的上下文长度阈值",
+ "llm.load.mlx.kvCacheQuantizationStart/info": "开始量化KV缓存的上下文长度阈值",
+ "llm.load.mlx.kvCacheQuantization/title": "KV缓存量化",
+ "llm.load.mlx.kvCacheQuantization/subTitle": "量化模型的KV缓存。这可能导致更快的生成和更低的内存占用,\n但会牺牲模型输出的质量。",
+ "llm.load.mlx.kvCacheQuantization/bits/title": "KV缓存量化比特",
+ "llm.load.mlx.kvCacheQuantization/bits/tooltip": "量化KV缓存的比特数",
+ "llm.load.mlx.kvCacheQuantization/bits/bits": "比特",
+ "llm.load.mlx.kvCacheQuantization/groupSize/title": "组大小策略",
+ "llm.load.mlx.kvCacheQuantization/groupSize/accuracy": "准确",
+ "llm.load.mlx.kvCacheQuantization/groupSize/balanced": "平衡",
+ "llm.load.mlx.kvCacheQuantization/groupSize/speedy": "快速",
+ "llm.load.mlx.kvCacheQuantization/groupSize/tooltip": "高级:量化'matmul组大小'配置\n\n• 准确 = 组大小32\n• 平衡 = 组大小64\n• 快速 = 组大小128\n",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/title": "ctx达到此长度时开始量化",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/tooltip": "当上下文达到此数量的令牌时,\n开始量化KV缓存",
"embedding.load.contextLength/title": "上下文长度",
- "embedding.load.contextLength/subTitle": "模型可以一次性关注的词元(token)最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
- "embedding.load.contextLength/info": "指定模型一次可以考虑的最大词元(token)数量,影响其处理过程中保留的上下文量",
- "embedding.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
- "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
+ "embedding.load.contextLength/subTitle": "模型在一个提示中可以关注的最大令牌数。有关管理此功能的更多信息,请参阅“推理参数”下的对话溢出选项",
+ "embedding.load.contextLength/info": "指定模型一次可以考虑的最大令牌数,影响其在处理过程中的上下文保留能力",
+ "embedding.load.llama.ropeFrequencyBase/title": "RoPE频率基",
+ "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下实现更好的性能",
"embedding.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
- "embedding.load.llama.evalBatchSize/title": "评估批处理大小",
- "embedding.load.llama.evalBatchSize/subTitle": "每次处理的输入词元(token)数量。增加此值会提高性能,但会增加内存使用量",
- "embedding.load.llama.evalBatchSize/info": "设置评估期间一起处理的词元(token)数量",
- "embedding.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
- "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
- "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
- "embedding.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
- "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
- "embedding.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
+ "embedding.load.llama.evalBatchSize/title": "评估批次大小",
+ "embedding.load.llama.evalBatchSize/subTitle": "一次处理的输入令牌数。增加此值会提高性能但会增加内存使用",
+ "embedding.load.llama.evalBatchSize/info": "设置评估期间一批处理的令牌数量",
+ "embedding.load.llama.ropeFrequencyScale/title": "RoPE频率比例",
+ "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度通过此因子缩放以使用RoPE扩展有效上下文",
+ "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率比例以控制位置编码粒度",
+ "embedding.load.llama.acceleration.offloadRatio/title": "GPU卸载",
+ "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于GPU加速的离散模型层数",
+ "embedding.load.llama.acceleration.offloadRatio/info": "设置要卸载到GPU的层数。",
"embedding.load.llama.keepModelInMemory/title": "保持模型在内存中",
- "embedding.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
- "embedding.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
- "embedding.load.llama.tryMmap/title": "尝试 mmap()",
- "embedding.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "embedding.load.llama.keepModelInMemory/subTitle": "即使卸载到GPU,也要为模型保留系统内存。提高性能但需要更多系统RAM",
+ "embedding.load.llama.keepModelInMemory/info": "防止模型被换出到磁盘,确保更快访问,但代价是更高的RAM使用",
+ "embedding.load.llama.tryMmap/title": "尝试mmap()",
+ "embedding.load.llama.tryMmap/subTitle": "改进模型的加载时间。禁用此功能可能在模型大于可用系统RAM时提高性能",
"embedding.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
"embedding.load.seed/title": "种子",
- "embedding.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机种子",
+ "embedding.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1为随机种子",
- "embedding.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+ "embedding.load.seed/info": "随机种子:设置随机数生成的种子以确保结果可重现",
"presetTooltip": {
"included/title": "预设值",
- "included/description": "以下字段将会被应用",
- "included/empty": "在此上下文中,此预设没有适用的字段。",
- "included/conflict": "您将被要求选择是否应用此值",
+ "included/description": "将应用以下字段",
+ "included/empty": "此上下文中不适用任何预设字段。",
+ "included/conflict": "将询问您是否应用此值",
"separateLoad/title": "加载时配置",
- "separateLoad/description.1": "预设还包含以下加载时配置。加载时配置是全模型范围的,并且需要重新加载模型才能生效。按住",
- "separateLoad/description.2": "应用到",
+ "separateLoad/description.1": "预设还包括以下加载时配置。加载时配置是模型范围的,需要重新加载模型才能生效。按住",
+ "separateLoad/description.2": "以应用于",
"separateLoad/description.3": "。",
"excluded/title": "可能不适用",
"excluded/description": "以下字段包含在预设中,但在当前上下文中不适用。",
"legacy/title": "旧版预设",
- "legacy/description": "这是一个旧版预设。它包括以下字段,这些字段现在要么自动处理,要么不再适用。"
+ "legacy/description": "这是一个旧版预设。它包括以下字段,这些字段现在自动处理或不再适用。",
+ "button/publish": "发布到Hub",
+ "button/pushUpdate": "推送更改到Hub",
+ "button/export": "导出"
},
"customInputs": {
"string": {
- "emptyParagraph": "<空>"
+ "emptyParagraph": " "
},
"checkboxNumeric": {
"off": "关闭"
},
+ "llamaCacheQuantizationType": {
+ "off": "关闭"
+ },
+ "mlxKvCacheBits": {
+ "off": "关闭"
+ },
"stringArray": {
- "empty": "<空>"
+ "empty": " "
},
"llmPromptTemplate": {
"type": "类型",
"types.jinja/label": "模板 (Jinja)",
- "jinja.bosToken/label": "开始词元 (BOS Token)",
- "jinja.eosToken/label": "结束词元 (EOS Token)",
+ "jinja.bosToken/label": "BOS令牌",
+ "jinja.eosToken/label": "EOS令牌",
"jinja.template/label": "模板",
- "jinja/error": "解析 Jinja 模板失败: {{error}}",
- "jinja/empty": "请在上方输入一个 Jinja 模板。",
- "jinja/unlikelyToWork": "您提供的 Jinja 模板很可能无法正常工作,因为它没有引用变量 \"messages\"。请检查您输入的模板是否正确。",
+ "jinja/error": "无法解析Jinja模板:{{error}}",
+ "jinja/empty": "请在上面输入Jinja模板。",
+ "jinja/unlikelyToWork": "您在上面提供的Jinja模板不太可能工作,因为它没有引用变量“messages”。请仔细检查是否输入了正确的模板。",
"types.manual/label": "手动",
- "manual.subfield.beforeSystem/label": "系统前缀",
+ "manual.subfield.beforeSystem/label": "系统之前",
"manual.subfield.beforeSystem/placeholder": "输入系统前缀...",
- "manual.subfield.afterSystem/label": "系统后缀",
+ "manual.subfield.afterSystem/label": "系统之后",
"manual.subfield.afterSystem/placeholder": "输入系统后缀...",
- "manual.subfield.beforeUser/label": "用户前缀",
+ "manual.subfield.beforeUser/label": "用户之前",
"manual.subfield.beforeUser/placeholder": "输入用户前缀...",
- "manual.subfield.afterUser/label": "用户后缀",
+ "manual.subfield.afterUser/label": "用户之后",
"manual.subfield.afterUser/placeholder": "输入用户后缀...",
- "manual.subfield.beforeAssistant/label": "助手前缀",
+ "manual.subfield.beforeAssistant/label": "助手之前",
"manual.subfield.beforeAssistant/placeholder": "输入助手前缀...",
- "manual.subfield.afterAssistant/label": "助手后缀",
+ "manual.subfield.afterAssistant/label": "助手之后",
"manual.subfield.afterAssistant/placeholder": "输入助手后缀...",
- "stopStrings/label": "额外停止字符串",
- "stopStrings/subTitle": "除了用户指定的停止字符串之外,还将使用特定于模板的停止字符串。"
+ "stopStrings/label": "附加停止字符串",
+ "stopStrings/subTitle": "模板特定的停止字符串,将与用户指定的停止字符串一起使用。"
},
"contextLength": {
- "maxValueTooltip": "这是模型训练所能处理的最大词元(token)数量。点击以将上下文设置为此值",
+ "maxValueTooltip": "这是模型训练所能处理的最大令牌数。点击以将上下文设置为此值",
"maxValueTextStart": "模型支持最多",
- "maxValueTextEnd": "个词元(token)",
- "tooltipHint": "尽管模型可能支持一定数量的词元(token),但如果您的机器资源无法处理负载,性能可能会下降 - 增加此值时请谨慎"
+ "maxValueTextEnd": "令牌",
+ "tooltipHint": "虽然模型可能支持多达一定数量的令牌,但如果您的机器资源无法承受负载,性能可能会下降 - 增加此值时请谨慎"
},
"contextOverflowPolicy": {
"stopAtLimit": "到达限制时停止",
- "stopAtLimitSub": "一旦模型的内存满载即停止生成",
- "truncateMiddle": "截断中间",
- "truncateMiddleSub": "从对话中间移除消息以为新消息腾出空间。模型仍然会记住对话的开头",
+ "stopAtLimitSub": "一旦模型的内存满了就停止生成",
+ "truncateMiddle": "中间截断",
+ "truncateMiddleSub": "从中部删除消息以为较新的消息腾出空间。模型仍会记住对话的开头",
"rollingWindow": "滚动窗口",
- "rollingWindowSub": "模型将始终接收最近的几条消息,但可能会忘记对话的开头"
+ "rollingWindowSub": "模型将始终获取最近的几条消息,但可能会忘记对话的开头"
},
"llamaAccelerationOffloadRatio": {
"max": "最大",
"off": "关闭"
+ },
+ "llamaAccelerationSplitStrategy": {
+ "evenly": "均匀",
+ "favorMainGpu": "偏向主GPU"
+ },
+ "speculativeDecodingDraftModel": {
+ "readMore": "阅读其工作原理",
+ "placeholder": "选择兼容的草稿模型",
+ "noCompatible": "未找到与当前模型选择兼容的草稿模型",
+ "stillLoading": "正在识别兼容的草稿模型...",
+ "notCompatible": "所选草稿模型( )与当前模型选择( )不兼容。",
+ "off": "关闭",
+ "loadModelToSeeOptions": "加载模型 以查看兼容选项",
+ "compatibleWithNumberOfModels": "推荐用于至少{{dynamicValue}}个您的模型",
+ "recommendedForSomeModels": "推荐用于某些模型",
+ "recommendedForLlamaModels": "推荐用于Llama模型",
+ "recommendedForQwenModels": "推荐用于Qwen模型",
+ "onboardingModal": {
+ "introducing": "介绍",
+ "speculativeDecoding": "推测解码",
+ "firstStepBody": "适用于 llama.cpp 和 MLX 模型的推理加速",
+ "secondStepTitle": "推测解码的推理加速",
+ "secondStepBody": "推测解码是一种涉及两个模型协作的技术:\n - 较大的“主”模型\n - 较小的“草稿”模型\n\n在生成过程中,草稿模型快速提出令牌供较大的主模型验证。验证令牌比实际生成它们快得多,这是速度提升的来源。**通常,主模型和草稿模型之间的尺寸差异越大,加速越明显**。\n\n为了保持质量,主模型只接受与其自身生成一致的令牌,从而在较快的推理速度下保持较大模型的响应质量。两个模型必须共享相同的词汇表。",
+ "draftModelRecommendationsTitle": "草稿模型建议",
+ "basedOnCurrentModels": "基于您当前的模型",
+ "close": "关闭",
+ "next": "下一步",
+ "done": "完成"
+ },
+ "speculativeDecodingLoadModelToSeeOptions": "请先加载模型 ",
+ "errorEngineNotSupported": "推测解码需要至少版本{{minVersion}}的引擎{{engineName}}。请更新引擎( )并重新加载模型以使用此功能。",
+ "errorEngineNotSupported/noKey": "推测解码需要至少版本{{minVersion}}的引擎{{engineName}}。请更新引擎并重新加载模型以使用此功能。"
+ },
+ "llmReasoningParsing": {
+ "startString/label": "起始字符串",
+ "startString/placeholder": "输入起始字符串...",
+ "endString/label": "结束字符串",
+ "endString/placeholder": "输入结束字符串..."
}
},
"saveConflictResolution": {
"title": "选择要包含在预设中的值",
- "description": "挑选并选择要保留的值",
+ "description": "选择要保留的值",
"instructions": "点击一个值以包含它",
"userValues": "先前值",
"presetValues": "新值",
@@ -218,52 +331,181 @@
},
"applyConflictResolution": {
"title": "保留哪些值?",
- "description": "您有未提交的更改与即将应用的预设有重叠",
+ "description": "您有未提交的更改,与即将应用的预设重叠",
"instructions": "点击一个值以保留它",
"userValues": "当前值",
- "presetValues": "即将应用的预设值",
+ "presetValues": "即将到来的预设值",
"confirm": "确认",
"cancel": "取消"
},
- "empty": "<空>",
+ "empty": " ",
+ "noModelSelected": "未选择模型",
+ "apiIdentifier.label": "API标识符",
+ "apiIdentifier.hint": "可选地为此模型提供一个标识符。这将在API请求中使用。留空以使用默认标识符。",
+ "idleTTL.label": "空闲自动卸载 (TTL)",
+ "idleTTL.hint": "如果设置,模型将在空闲指定时间后自动卸载。",
+ "idleTTL.mins": "分钟",
+
"presets": {
"title": "预设",
"commitChanges": "提交更改",
- "commitChanges/description": "将您的更改提交给预设。",
- "commitChanges.manual": "检测到新的字段。您将能够选择要包含在预设中的更改。",
+ "commitChanges/description": "将您的更改提交到预设。",
+ "commitChanges.manual": "检测到新字段。您将能够选择要包含在预设中的更改。",
"commitChanges.manual.hold.0": "按住",
- "commitChanges.manual.hold.1": "选择要提交给预设的更改。",
+ "commitChanges.manual.hold.1": "以选择要提交到预设的更改。",
"commitChanges.saveAll.hold.0": "按住",
- "commitChanges.saveAll.hold.1": "保存所有更改。",
+ "commitChanges.saveAll.hold.1": "以保存所有更改。",
"commitChanges.saveInPreset.hold.0": "按住",
- "commitChanges.saveInPreset.hold.1": "仅保存已经包含在预设中的字段的更改。",
- "commitChanges/error": "未能将更改提交给预设。",
+ "commitChanges.saveInPreset.hold.1": "以仅保存已包含在预设中的字段的更改。",
+ "commitChanges/error": "无法提交更改到预设。",
"commitChanges.manual/description": "选择要包含在预设中的更改。",
- "saveAs": "另存为新预设...",
- "presetNamePlaceholder": "为预设输入一个名称...",
- "cannotCommitChangesLegacy": "这是一个旧版预设,无法修改。您可以使用“另存为新预设...”创建一个副本。",
- "cannotCommitChangesNoChanges": "没有更改可以提交。",
- "emptyNoUnsaved": "选择一个预设...",
- "emptyWithUnsaved": "未保存的预设",
+ "saveAs": "另存为新...",
+ "presetNamePlaceholder": "输入预设名称...",
+ "cannotCommitChangesLegacy": "这是一个旧版预设,无法修改。您可以通过使用“另存为新...”创建副本。",
+ "cannotCommitChangesNoChanges": "没有要提交的更改。",
+ "emptyNoUnsaved": "选择预设...",
+ "emptyWithUnsaved": "未保存预设",
"saveEmptyWithUnsaved": "保存预设为...",
"saveConfirm": "保存",
"saveCancel": "取消",
- "saving": "正在保存...",
- "save/error": "未能保存预设。",
+ "saving": "保存中...",
+ "save/error": "无法保存预设。",
"deselect": "取消选择预设",
- "deselect/error": "取消选择预设失败。",
- "select/error": "选择预设失败。",
- "delete/error": "删除预设失败。",
- "discardChanges": "丢弃未保存的更改",
- "discardChanges/info": "丢弃所有未提交的更改并恢复预设至原始状态",
- "newEmptyPreset": "创建新的空预设...",
- "contextMenuSelect": "选择预设",
- "contextMenuDelete": "删除"
+ "deselect/error": "无法取消选择预设。",
+ "select/error": "无法选择预设。",
+ "delete/error": "无法删除预设。",
+ "discardChanges": "放弃未保存",
+ "discardChanges/info": "放弃所有未提交的更改并将预设恢复到原始状态",
+ "newEmptyPreset": "+ 新建预设",
+ "importPreset": "导入",
+ "contextMenuSelect": "应用预设",
+ "contextMenuDelete": "删除...",
+ "contextMenuShare": "发布...",
+ "contextMenuOpenInHub": "在Hub上查看",
+ "contextMenuPushChanges": "推送更改到Hub",
+ "contextMenuPushingChanges": "推送中...",
+ "contextMenuPushedChanges": "更改已推送",
+ "contextMenuExport": "导出文件",
+ "contextMenuRevealInExplorer": "在文件资源管理器中显示",
+ "contextMenuRevealInFinder": "在Finder中显示",
+ "share": {
+ "title": "发布预设",
+ "action": "分享您的预设,供他人下载、点赞和派生",
+ "presetOwnerLabel": "拥有者",
+ "uploadAs": "您的预设将作为{{name}}创建",
+ "presetNameLabel": "预设名称",
+ "descriptionLabel": "描述(可选)",
+ "loading": "发布中...",
+ "success": "预设成功推送",
+ "presetIsLive": " 现已在Hub上上线!",
+ "close": "关闭",
+ "confirmViewOnWeb": "在网页上查看",
+ "confirmCopy": "复制URL",
+ "confirmCopied": "已复制!",
+ "pushedToHub": "您的预设已推送到Hub",
+ "descriptionPlaceholder": "输入描述...",
+ "willBePublic": "发布您的预设将使其公开",
+ "publicSubtitle": "您的预设是 公开 的。其他人可以在lmstudio.ai上下载和派生它",
+ "confirmShareButton": "发布",
+ "error": "无法发布预设",
+ "createFreeAccount": "在Hub上创建免费账户以发布预设"
+ },
+ "update": {
+ "title": "推送更改到Hub",
+ "title/success": "预设成功更新",
+ "subtitle": "对 进行更改并将其推送到Hub",
+ "descriptionLabel": "描述",
+ "descriptionPlaceholder": "输入描述...",
+ "loading": "推送中...",
+ "cancel": "取消",
+ "createFreeAccount": "在Hub上创建免费账户以发布预设",
+ "error": "推送更新失败",
+ "confirmUpdateButton": "推送"
+ },
+ "import": {
+ "title": "从文件导入预设",
+ "dragPrompt": "拖放预设JSON文件或 从计算机中选择 ",
+ "remove": "移除",
+ "cancel": "取消",
+ "importPreset_zero": "导入预设",
+ "importPreset_one": "导入预设",
+ "importPreset_other": "导入 {{ count}} 个预设",
+ "selectDialog": {
+ "title": "选择预设文件 (.json)",
+ "button": "导入"
+ },
+ "error": "无法导入预设",
+ "resultsModal": {
+ "titleSuccessSection_one": "成功导入1个预设",
+ "titleSuccessSection_other": "成功导入{{count}}个预设",
+ "titleFailSection_zero": "",
+ "titleFailSection_one": "({{count}} 失败)",
+ "titleFailSection_other": "({{count}} 失败)",
+ "titleAllFailed": "无法导入预设",
+ "importMore": "导入更多",
+ "close": "完成",
+ "successBadge": "成功",
+ "alreadyExistsBadge": "预设已存在",
+ "errorBadge": "错误",
+ "invalidFileBadge": "无效文件",
+ "otherErrorBadge": "无法导入预设",
+ "errorViewDetailsButton": "查看详情",
+ "seeError": "查看错误",
+ "noName": "无预设名称",
+ "useInChat": "在聊天中使用"
+ },
+ "importFromUrl": {
+ "button": "从URL导入...",
+ "title": "从URL导入",
+ "back": "从文件导入...",
+ "action": "在下方粘贴要导入的预设的LM Studio Hub URL",
+ "invalidUrl": "无效URL。请确保您粘贴的是正确的LM Studio Hub URL。",
+ "tip": "您可以直接使用LM Studio Hub中的{{buttonName}}按钮安装预设",
+ "confirm": "导入",
+ "cancel": "取消",
+ "loading": "正在导入...",
+ "error": "无法下载预设。"
+ }
+ },
+ "download": {
+ "title": "从LM Studio Hub拉取",
+ "subtitle": "将保存到您的预设中。这样做后,您可以在应用中使用此预设",
+ "button": "拉取",
+ "button/loading": "正在拉取...",
+ "cancel": "取消",
+ "error": "无法下载预设。"
+ },
+ "inclusiveness": {
+ "speculativeDecoding": "包含在预设中"
+ }
},
-
- "flashAttentionWarning": "闪电注意力是一项实验性功能,可能会导致某些模型出现问题。如果您遇到问题,请尝试禁用它。",
+
+ "flashAttentionWarning": "Flash Attention 是一项实验性功能,可能会导致某些模型出现问题。如果遇到问题,请尝试禁用它。",
+ "llamaKvCacheQuantizationWarning": "KV缓存量化是一项实验性功能,可能会导致某些模型出现问题。启用V缓存量化时必须启用Flash Attention。如果遇到问题,请重置为默认的“F16”。",
"seedUncheckedHint": "随机种子",
"ropeFrequencyBaseUncheckedHint": "自动",
- "ropeFrequencyScaleUncheckedHint": "自动"
+ "ropeFrequencyScaleUncheckedHint": "自动",
+
+ "hardware": {
+ "advancedGpuSettings": "高级GPU设置",
+ "advancedGpuSettings.info": "如果不确定,请将这些值保留为默认值",
+ "advancedGpuSettings.reset": "重置为默认值",
+ "environmentVariables": {
+ "title": "环境变量",
+ "description": "模型生命周期内的活动环境变量。",
+ "key.placeholder": "选择变量...",
+ "value.placeholder": "值"
+ },
+ "mainGpu": {
+ "title": "主GPU",
+ "description": "优先用于模型计算的GPU。",
+ "placeholder": "选择主GPU..."
+ },
+ "splitStrategy": {
+ "title": "拆分策略",
+ "description": "如何在多个GPU之间拆分模型计算。",
+ "placeholder": "选择拆分策略..."
+ }
+ }
}
diff --git a/zh-CN/developer.json b/zh-CN/developer.json
index 71bd762c..81aec289 100644
--- a/zh-CN/developer.json
+++ b/zh-CN/developer.json
@@ -1,68 +1,169 @@
{
"tabs/server": "本地服务器",
- "tabs/extensions": "LM 运行环境",
+ "tabs/extensions": "LM运行环境",
"loadSettings/title": "加载设置",
- "modelSettings/placeholder": "选择一个模型进行配置",
-
- "loadedModels/noModels": "没有已加载的模型",
-
+ "modelSettings/placeholder": "选择一个模型以进行配置",
+
+ "loadedModels/noModels": "没有加载的模型",
+
"serverOptions/title": "服务器选项",
"serverOptions/configurableTitle": "可配置选项",
- "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果该端口已被占用,您可能需要更改此设置。",
+ "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio使用1234端口。如果该端口已被占用,则可能需要更改此设置。",
"serverOptions/port/subtitle": "监听的端口",
"serverOptions/autostart/title": "自动启动服务器",
- "serverOptions/autostart/hint": "当加载模型时自动启动本地服务器",
+ "serverOptions/autostart/hint": "在应用程序或服务启动时自动开启LM Studio的本地LLM服务器。",
"serverOptions/port/integerWarning": "端口号必须是整数",
- "serverOptions/port/invalidPortWarning": "端口号必须介于 1 到 65535 之间",
- "serverOptions/cors/title": "启用 CORS",
- "serverOptions/cors/hint1": "启用 CORS (跨源资源共享) 允许您访问的网站向 LM Studio 服务器发起请求。",
- "serverOptions/cors/hint2": "当从网页或 VS Code / 其他扩展发起请求时,可能需要启用 CORS。",
- "serverOptions/cors/subtitle": "允许跨源请求",
- "serverOptions/network/title": "在网络中提供服务",
- "serverOptions/network/subtitle": "向网络中的设备开放服务器",
+ "serverOptions/port/invalidPortWarning": "端口必须在1到65535之间",
+ "serverOptions/cors/title": "启用CORS",
+ "serverOptions/cors/hint1": "启用CORS(跨域资源共享)将允许您访问的网站向LM Studio服务器发送请求。",
+ "serverOptions/cors/hint2": "当从网页或VS Code/其他扩展发出请求时,可能需要启用CORS。",
+ "serverOptions/cors/subtitle": "允许跨域请求",
+ "serverOptions/network/title": "在局域网中提供服务",
+ "serverOptions/network/subtitle": "将服务器暴露给网络中的其他设备",
"serverOptions/network/hint1": "是否允许来自网络中其他设备的连接。",
- "serverOptions/network/hint2": "如果未选中,服务器将仅监听本地主机。",
+ "serverOptions/network/hint2": "如果不勾选,服务器将仅监听localhost。",
"serverOptions/verboseLogging/title": "详细日志记录",
"serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志记录",
"serverOptions/contentLogging/title": "记录提示和响应",
- "serverOptions/contentLogging/subtitle": "本地请求/响应日志记录设置",
+ "serverOptions/contentLogging/subtitle": "本地请求/响应日志设置",
"serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示和/或响应。",
+ "serverOptions/fileLoggingMode/title": "文件日志模式",
+ "serverOptions/fileLoggingMode/off/title": "关闭",
+ "serverOptions/fileLoggingMode/off/hint": "不创建日志文件",
+ "serverOptions/fileLoggingMode/succinct/title": "简洁",
+ "serverOptions/fileLoggingMode/succinct/hint": "记录与控制台相同的内容。长请求将被截断。",
+ "serverOptions/fileLoggingMode/full/title": "完整",
+ "serverOptions/fileLoggingMode/full/hint": "不截断长请求。",
"serverOptions/jitModelLoading/title": "即时模型加载",
- "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了一个未加载的模型,该模型将自动加载并使用。此外,\"/v1/models\" 端点还将包含尚未加载的模型。",
- "serverOptions/loadModel/error": "加载模型失败",
-
+ "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了未加载的模型,它将自动加载并使用。此外,“/v1/models”端点还将包含尚未加载的模型。",
+ "serverOptions/loadModel/error": "无法加载模型",
+ "serverOptions/jitModelLoadingTTL/title": "自动卸载未使用的JIT加载模型",
+ "serverOptions/jitModelLoadingTTL/hint": "通过即时加载(JIT)加载的模型,在一段时间(TTL)内未被使用后将被自动卸载。",
+ "serverOptions/jitModelLoadingTTL/ttl/label": "最大空闲TTL",
+ "serverOptions/jitModelLoadingTTL/ttl/unit": "分钟",
+ "serverOptions/unloadPreviousJITModelOnLoad/title": "仅保留最后的JIT加载模型",
+ "serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任何给定时间最多只有一个通过JIT加载的模型(卸载之前的模型)。",
+
"serverLogs/scrollToBottom": "跳转到底部",
"serverLogs/clearLogs": "清除日志 ({{shortcut}})",
"serverLogs/openLogsFolder": "打开服务器日志文件夹",
-
+
"runtimeSettings/title": "运行环境设置",
- "runtimeSettings/chooseRuntime/title": "配置运行环境",
- "runtimeSettings/chooseRuntime/description": "为每个模型格式选择一个运行环境",
+ "runtimeSettings/chooseRuntime/title": "默认选择",
+ "runtimeSettings/chooseRuntime/description": "为每种模型格式选择默认的运行环境",
"runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有运行环境",
- "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 只显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
+ "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio仅显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
"runtimeSettings/chooseRuntime/select/placeholder": "选择一个运行环境",
-
+
"runtimeOptions/uninstall": "卸载",
"runtimeOptions/uninstallDialog/title": "卸载 {{runtimeName}}?",
- "runtimeOptions/uninstallDialog/body": "卸载此运行环境将从系统中移除它。此操作不可逆。",
- "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能需要在重启 LM Studio 后才能被移除。",
- "runtimeOptions/uninstallDialog/error": "卸载运行环境失败",
+ "runtimeOptions/uninstallDialog/body": "卸载此运行环境将将其从系统中移除。此操作不可逆。",
+ "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能只有在LM Studio重新启动后才会被删除。",
+ "runtimeOptions/uninstallDialog/error": "无法卸载运行环境",
"runtimeOptions/uninstallDialog/confirm": "继续并卸载",
"runtimeOptions/uninstallDialog/cancel": "取消",
"runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行环境",
- "runtimeOptions/downloadIncompatibleRuntime": "此运行环境被认为与您的机器不兼容。它很可能无法正常工作。",
+ "runtimeOptions/downloadIncompatibleRuntime": "此运行环境被确定为与您的机器不兼容。它很可能无法正常工作。",
"runtimeOptions/noRuntimes": "未找到运行环境",
-
- "inferenceParams/noParams": "此模型类型没有可配置的推理参数",
-
- "endpoints/openaiCompatRest/title": "支持的端点 (类似 OpenAI 的)",
- "endpoints/openaiCompatRest/getModels": "列出当前已加载的模型",
- "endpoints/openaiCompatRest/postCompletions": "文本补全模式。给定一个提示,预测下一个词元(token)。注意:OpenAI 认为此端点已'弃用'。",
- "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手响应",
+
+ "runtimes": {
+ "manageLMRuntimes": "管理LM运行环境",
+ "includeOlderRuntimeVersions": "包括旧版运行环境",
+ "dismiss": "关闭",
+ "updateAvailableToast": {
+ "title": "LM运行环境更新可用!"
+ },
+ "updatedToast": {
+ "title": " ✅ LM运行环境已更新:{{runtime}} → v{{version}}",
+ "preferencesUpdated": "新加载的{{compatibilityTypes}}模型将使用更新后的运行环境。"
+ },
+ "noAvx2ErrorMessage": "所有LM运行环境当前都需要支持AVX2的CPU",
+ "downloadableRuntimes": {
+ "runtimeExtensionPacks": "运行环境扩展包",
+ "refresh": "刷新",
+ "refreshing": "正在刷新...",
+ "filterSegment": {
+ "compatibleOnly": "仅兼容",
+ "all": "全部"
+ },
+ "card": {
+ "releaseNotes": "发行说明",
+ "latestVersionInstalled": "已安装最新版本",
+ "updateAvailable": "有可用更新"
+ }
+ },
+ "installedRuntimes": {
+ "manage": {
+ "title": "管理活动运行环境"
+ },
+ "dropdownOptions": {
+ "installedVersions": "管理版本",
+ "close": "关闭"
+ },
+ "tabs": {
+ "all": "全部",
+ "frameworks": "我的框架",
+ "engines": "我的引擎"
+ },
+ "detailsModal": {
+ "installedVersions": "{{runtimeName}} 的已安装版本",
+ "manifestJsonTitle": "清单JSON(高级)",
+ "releaseNotesTitle": "发行说明",
+ "noReleaseNotes": "此版本没有可用的发行说明",
+ "back": "返回",
+ "close": "关闭"
+ },
+ "noEngines": "未安装引擎",
+ "noFrameworks": "未安装框架"
+ }
+ },
+
+ "inferenceParams/noParams": "此模型类型没有可用的可配置推理参数",
+
+ "quickDocs": {
+ "tabChipTitle": "快速文档",
+ "newToolUsePopover": "代码片段现在可以在“快速文档”中使用。点击这里开始使用工具!",
+ "newToolUsePopoverTitle": "📚 快速文档",
+ "learnMore": "ℹ️ 👾 要了解更多关于LM Studio本地服务器端点的信息,请访问[文档](https://lmstudio.ai/docs)。",
+ "helloWorld": {
+ "title": "你好,世界!"
+ },
+ "chat": {
+ "title": "聊天"
+ },
+ "structuredOutput": {
+ "title": "结构化输出"
+ },
+ "imageInput": {
+ "title": "图像输入"
+ },
+ "embeddings": {
+ "title": "嵌入"
+ },
+ "toolUse": {
+ "title": "工具使用",
+ "tab": {
+ "saveAsPythonFile": "保存为Python文件",
+ "runTheScript": "运行脚本:",
+ "savePythonFileCopyPaste": "保存为Python文件以便复制粘贴命令"
+ }
+ },
+ "newBadge": "新"
+ },
+
+ "endpoints/openaiCompatRest/title": "支持的端点(类似OpenAI)",
+ "endpoints/openaiCompatRest/getModels": "列出当前加载的模型",
+ "endpoints/openaiCompatRest/postCompletions": "文本补全模式。根据提示预测下一个令牌。注意:OpenAI认为此端点已‘弃用’。",
+ "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手回复",
"endpoints/openaiCompatRest/postEmbeddings": "文本嵌入。为给定的文本输入生成文本嵌入。接受字符串或字符串数组。",
+
+ "model.createVirtualModelFromInstance": "将设置保存为新的虚拟模型",
+ "model.createVirtualModelFromInstance/error": "无法将设置保存为新的虚拟模型",
+
+ "model": {
+ "toolUseSectionTitle": "工具使用",
+ "toolUseDescription": "检测到此模型已经过工具使用的训练\n\n打开快速文档获取更多信息"
+ },
- "model.createVirtualModelFromInstance": "另存为新的虚拟模型",
- "model.createVirtualModelFromInstance/error": "另存为新的虚拟模型失败",
-
- "apiConfigOptions/title": "API 配置"
+ "apiConfigOptions/title": "API配置"
}
diff --git a/zh-CN/discover.json b/zh-CN/discover.json
index 26200bfe..9abf90d7 100644
--- a/zh-CN/discover.json
+++ b/zh-CN/discover.json
@@ -1,26 +1,26 @@
{
"collectionsColumn": "集合",
- "collectionsColumn/collectionError": "加载集合详情时出错,请尝试上方的刷新按钮",
+ "collectionsColumn/collectionError": "加载集合详情时出错,请尝试刷新",
"bookmarksColumn": "书签",
- "searchBar/placeholder": "在 Hugging Face 上搜索模型...",
- "searchBar/huggingFaceError": "从 Hugging Face 获取结果时出现错误,请稍后再试",
- "sortBy": "排序依据",
+ "searchBar/placeholder": "在Hugging Face上搜索模型...",
+ "searchBar/huggingFaceError": "从Hugging Face获取结果时出错,请稍后再试",
+ "sortBy": "排序方式",
"searchSortKey.default/title": "最佳匹配",
"searchSortKey.likes/title": "最多点赞",
"searchSortKey.downloads/title": "最多下载",
"searchSortKey.lastModified/title": "最近更新",
"searchSortKey.createdAt/title": "最近创建",
- "download.option.willFitEstimation.caveat": "可能存在其他因素阻止其加载,例如模型架构、模型文件完整性或计算机上可用的内存量。",
- "download.option.willFitEstimation.fullGPUOffload/title": "完全 GPU 加载可能",
- "download.option.willFitEstimation.fullGPUOffload/description": "此模型可能完全适合您的 GPU 内存。这可能会显著加快推理速度。",
- "download.option.willFitEstimation.partialGPUOffload/title": "部分 GPU 加载可能",
- "download.option.willFitEstimation.partialGPUOffload/description": "此模型可能部分适合您的 GPU 内存。这通常会显著加快推理速度。",
+ "download.option.willFitEstimation.caveat": "可能还有其他因素会阻止其加载,例如模型的架构、模型文件完整性或计算机上可用的内存数量。",
+ "download.option.willFitEstimation.fullGPUOffload/title": "完全GPU卸载可能",
+ "download.option.willFitEstimation.fullGPUOffload/description": "该模型可能完全适合您的GPU内存。这可以显著加快推理速度。",
+ "download.option.willFitEstimation.partialGPUOffload/title": "部分GPU卸载可能",
+ "download.option.willFitEstimation.partialGPUOffload/description": "该模型可能部分适合您的GPU内存。这通常可以显著加快推理速度。",
"download.option.willFitEstimation.fitWithoutGPU/title": "可能适合",
- "download.option.willFitEstimation.fitWithoutGPU/description": "此模型可能适合您的机器内存。",
- "download.option.willFitEstimation.willNotFit/title": "对于此机器可能过大",
- "download.option.willFitEstimation.willNotFit/description": "成功使用此模型文件所需的内存可能超过您机器上的可用资源。下载此文件不推荐。",
+ "download.option.willFitEstimation.fitWithoutGPU/description": "该模型很可能适合您机器的内存。",
+ "download.option.willFitEstimation.willNotFit/title": "对本机来说可能过大",
+ "download.option.willFitEstimation.willNotFit/description": "成功使用此模型文件所需的内存可能超出您机器上的可用资源。不建议下载此文件。",
"download.option.recommended/title": "推荐",
- "download.option.recommended/description": "基于您的硬件,此选项被推荐。",
+ "download.option.recommended/description": "基于您的硬件,此选项是推荐的。",
"download.option.downloaded/title": "已下载",
"download.option.downloading/title": "正在下载 ({{progressPercentile}}%)",
diff --git a/zh-CN/download.json b/zh-CN/download.json
index 8f36aaad..e984da72 100644
--- a/zh-CN/download.json
+++ b/zh-CN/download.json
@@ -1,23 +1,23 @@
{
- "postDownloadActionExecutor.zipExtraction/status": "解压中...",
- "finalizing": "完成下载...(这可能需要几分钟)",
+ "postDownloadActionExecutor.zipExtraction/status": "正在解压...",
+ "finalizing": "正在完成下载...(这可能需要一些时间)",
"noOptions": "没有可用的兼容下载选项",
-
- "deeplink/confirmation/title": "从 Hugging Face 下载模型 🤗",
+
+ "deeplink/confirmation/title": "从 Hugging Face 🤗 下载模型",
"deeplink/confirmation/subtitle": "{{modelName}}",
"deeplink/confirmation/selectRecommended": "选择推荐项",
"deeplink/confirmation/selectOption": "选择下载选项",
- "deeplink/confirmation/recommendedOption": "对大多数用户来说可能是最佳选项",
+ "deeplink/confirmation/recommendedOption": "可能是大多数用户的最佳选择",
"deeplink/confirmation/downloadButton": "下载",
"deeplink/confirmation/nevermindButton": "算了",
- "deeplink/confirmation/modelPresent/title": "找到 Hugging Face 模型 ✅",
- "deeplink/confirmation/modelPresent/body": "好消息!此模型文件已经在您的本地机器上可用。",
+ "deeplink/confirmation/modelPresent/title": "已找到 Hugging Face 模型 ✅",
+ "deeplink/confirmation/modelPresent/body": "好消息!该模型文件已经在您的本地机器上可用。",
"deeplink/confirmation/loadInChat": "在新聊天中加载 {{ modelName }}",
- "deeplink/error/modelNotFound/title": "哎呀,我们未能找到此模型",
- "deeplink/error/modelNotFound/body": "请再次检查模型名称,并考虑尝试不同的下载选项。",
+ "deeplink/error/modelNotFound/title": "哎呀,我们无法找到该模型",
+ "deeplink/error/modelNotFound/body": "请仔细检查模型名称,并考虑尝试其他下载选项。",
"deeplink/actions/trySearching": "尝试在 Hugging Face 上搜索 {{modelName}}",
"downloadsPanel/title": "下载",
- "downloadsPanel/sectionTitle/ongoing": "正在进行",
+ "downloadsPanel/sectionTitle/ongoing": "进行中",
"downloadsPanel/sectionTitle/completed": "已完成"
}
diff --git a/zh-CN/models.json b/zh-CN/models.json
index 66d373f9..5c4efaf6 100644
--- a/zh-CN/models.json
+++ b/zh-CN/models.json
@@ -3,83 +3,91 @@
"filterModels.placeholder": "筛选模型...",
"aggregate_one": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
"aggregate_other": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
-
- "noModels.title": "您的本地 LLM 将显示在这里。",
+
+ "noModels.title": "您的本地模型将显示在这里。",
"noModels.discoverButtonText.prefix": "点击左侧边栏的",
- "noModels.discoverButtonText.suffix": "按钮来发现有趣的 LLM 下载。",
- "noModels.discoverModelsPrompt": "去探索一些本地 LLM 吧!",
-
+ "noModels.discoverButtonText.suffix": "按钮,发现并下载有趣的LLM模型。",
+ "noModels.discoverModelsPrompt": "去探索一些本地LLM模型吧!",
+
"modelsTable.arch/label": "架构",
"modelsTable.params/label": "参数",
"modelsTable.publisher/label": "发布者",
- "modelsTable.llms/label": "LLM",
- "modelsTable.embeddingModels/label": "嵌入模型",
- "modelsTable.quant/label": "量化",
+ "modelsTable.displayName/label": "名称",
+ "modelsTable.modelKey/label": "模型键",
"modelsTable.size/label": "大小",
"modelsTable.dateModified/label": "修改日期",
"modelsTable.actions/label": "操作",
-
+
+ "modelsTable.quant/label": "量化",
+ "modelsTable.llms/label": "大语言模型",
+ "modelsTable.embeddingModels/label": "嵌入模型",
+
"action.model.delete": "删除",
"action.model.delete.full": "删除模型",
"action.model.delete.confirmation/title": "删除 {{name}}",
- "action.model.delete.confirmation/description": "您确定吗?这将永久删除与此模型相关的所有文件。此操作不可逆。",
+ "action.model.delete.confirmation/description": "确定吗?这将永久删除与该模型相关的所有文件。此操作不可逆。",
"action.model.delete.confirmation/confirm": "删除",
-
+
"action.createVirtual": "创建预设",
- "action.createVirtual.details/title": "创建预设",
+ "action.createVirtual.details/title": "创建一个预设",
"action.createVirtual.details/create": "创建",
"action.createVirtual.details/cancel": "取消",
"action.createVirtual.details.base/label": "基础模型",
"action.createVirtual.details.name/label": "名称",
- "action.createVirtual.details.includeMachineDependent/label": "包含依赖于机器的配置",
- "action.createVirtual.details.includeMachineDependent/hint": "是否在预设中包含依赖于机器的配置(如 GPU 设置)。不建议用于分享。",
+ "action.createVirtual.details.includeMachineDependent/label": "包含机器相关配置",
+ "action.createVirtual.details.includeMachineDependent/hint": "是否在预设中包含机器相关配置(例如GPU设置)。不建议用于共享。",
"action.createVirtual.details.config/label": "配置覆盖",
"action.createVirtual.details.config.empty": "无配置覆盖",
- "action.createVirtual.details/error": "创建虚拟模型失败。",
-
+ "action.createVirtual.details/error": "无法创建虚拟模型。",
+
"loader.model.bundled": "捆绑",
"action.cancel": "取消",
"indexingOngoing": "正在索引模型... 这可能需要几秒钟",
- "index/error_one": "索引以下文件夹失败:",
- "index/error_other": "索引以下文件夹失败:",
- "badModels/title_one": "索引以下模型失败:",
- "badModels/title_other": "索引以下模型失败:",
- "badModels.virtualModelIncorrectPlacement": "虚拟模型放置错误。预期位置为 {{expected}}。实际位置为 {{actual}}。",
- "badModels.virtualModelBadManifest": "无效的虚拟模型清单 (model.yaml):",
- "unresolvedVirtualModels/title_one": "解析以下虚拟模型失败:",
- "unresolvedVirtualModels/title_other": "解析以下虚拟模型失败:",
+ "index/error_one": "未能索引以下文件夹:",
+ "index/error_other": "未能索引以下文件夹:",
+ "badModels/title_one": "未能索引以下模型:",
+ "badModels/title_other": "未能索引以下模型:",
+ "badModels.virtualModelIncorrectPlacement": "虚拟模型位置不正确。期望位置为 {{expected}}。实际位置为 {{actual}}。",
+ "badModels.virtualModelBadManifest": "无效的虚拟模型清单(model.yaml):",
+ "unresolvedVirtualModels/title_one": "未能解析以下虚拟模型:",
+ "unresolvedVirtualModels/title_other": "未能解析以下虚拟模型:",
"unresolvedVirtualModels.missingModel": "缺少依赖模型:{{missing}}。依赖路径:\n{{chain}}",
"unresolvedVirtualModels.circular": "检测到循环依赖。",
-
+
"modelsDirectory": "模型目录",
"modelsDirectory.change": "更改...",
"modelsDirectory.reset": "重置为默认路径",
- "modelsDirectory.reveal.mac": "在 Finder 中显示",
+ "modelsDirectory.reveal.mac": "在Finder中显示",
"modelsDirectory.reveal.nonMac": "在文件资源管理器中打开",
"modelsDirectory.forceReindex": "刷新",
"loadState/loaded": "已加载",
- "loadState/loading": "加载中",
+ "loadState/loading": "正在加载",
"loadState/unloaded": "未加载",
- "loadState/unloading": "卸载中",
+ "loadState/unloading": "正在卸载",
"loadState/idle": "空闲",
- "pinned": "此模型已被固定。右键点击取消固定。",
- "lastUsed": "最后使用",
+ "pinned": "此模型已被固定。右键单击以取消固定。",
+ "lastUsed": "上次使用",
"contextMenu/pin": "固定到顶部",
"contextMenu/unpin": "取消固定",
"contextMenu/copyAbsolutePath": "复制绝对路径",
"contextMenu/copyModelName": "复制模型路径",
- "contextMenu/openOnHuggingFace": "在 Hugging Face 上打开",
+ "contextMenu/copyModelDefaultIdentifier": "复制默认标识符",
+ "contextMenu/showRawMetadata": "显示原始元数据",
+ "contextMenu/openOnHuggingFace": "在Hugging Face上打开",
"tooltip/moreActions": "更多操作",
"tooltip/getInfo": "获取信息",
"tooltip/editModelDefaultConfig": "编辑模型默认配置",
- "tooltip/editModelDefaultConfig/override": "编辑模型默认配置 (* 当前有覆盖",
- "tooltip/visionBadge": "此模型能够处理图像输入",
-
- "visionBadge/label": "视觉功能启用",
-
+ "tooltip/editModelDefaultConfig/override": "编辑模型默认配置(* 有覆盖)",
+ "tooltip/visionBadge": "此模型可以处理图像输入",
+ "tooltip/toolUseBadge": "此模型经过工具使用训练",
+
+ "visionBadge/label": "视觉支持",
+ "toolUseBadge/label": "工具使用训练",
+
"loader.action.load": "加载模型",
"loader.action.clearChanges": "清除更改",
"loader.action.cancel": "取消",
- "loader.info.clickOnModelToLoad": "点击模型以加载",
- "loader.info.configureLoadParameters": "配置模型加载参数"
+ "loader.info.clickOnModelToLoad": "点击模型以加载它",
+ "loader.info.configureLoadParameters": "配置模型加载参数",
+ "loader.info.activeGeneratorWarning": "您正在使用带有自定义生成器的插件。根据生成器的实现,当前加载的模型可能会或可能不会在此插件下应用。"
}
diff --git a/zh-CN/onboarding.json b/zh-CN/onboarding.json
index 5e8202f8..44925e91 100644
--- a/zh-CN/onboarding.json
+++ b/zh-CN/onboarding.json
@@ -1,24 +1,42 @@
{
"action.skipOnboarding": "跳过引导",
"action.next": "下一步",
- "action.back": "上一步",
+ "action.back": "返回",
"action.finish": "完成",
-
+
"dismissable_rag_modal": {
- "description": "现在您可以使用检索增强生成 (RAG) 与自己的文档进行聊天。以下是其工作原理:",
+ "description": "您现在可以使用检索增强生成(RAG)与自己的文档进行对话。以下是其工作原理:",
"instructions": {
"attach_files": {
- "title": "上传文件",
- "description": "一次最多可上传5个文件,总大小不超过30MB。支持的格式包括PDF、DOCX、TXT和CSV。"
+ "title": "附加文件",
+ "description": "一次最多上传5个文件,总大小不得超过30MB。支持的格式包括PDF、DOCX、TXT和CSV。"
},
"be_specific": {
- "title": "具体明确",
- "description": "提问时,尽可能多地提及细节。这有助于系统从您的文档中检索最相关的信息。"
+ "title": "具体说明",
+ "description": "在提问时,请尽可能提供详细信息。这有助于系统从您的文档中检索最相关的信息。"
},
"get_responses": {
- "title": "获取回应并实验",
- "description": "大语言模型将查看您的查询和从文档中检索到的摘录,并尝试生成回应。通过尝试不同的查询来找到最佳方法。"
+ "title": "获取响应并进行实验",
+ "description": "大语言模型将查看您的查询以及从文档中检索到的摘录,并尝试生成响应。尝试不同的查询以找到最佳效果。"
}
}
+ },
+
+ "toolUse": {
+ "step_0": {
+ "title": "测试版:工具使用 🛠️(函数调用)",
+ "text_0": "一些模型(例如Llama 3.1/3.2、Mistral、Qwen等)经过训练可以使用工具。",
+ "text_1": "这意味着:您需要以特定格式向大语言模型提供一组“工具”(函数签名),然后模型可以根据用户的提示决定是否“调用”这些工具。",
+ "text_2": "您可以想象的应用场景包括查询API、运行代码,或者任何可以通过函数调用表达的操作。"
+ },
+ "step_1": {
+ "title": "开始使用工具",
+ "toolUseCanWorkWithAnyModel": "经过工具使用训练的模型表现会优于其他模型,但您可以尝试对任何模型使用工具。阅读文档以了解更多。\n经过工具使用训练的模型将标有一个新徽章:",
+ "hasCompatibleModel": "🎉 看起来您已经拥有支持工具使用的模型!",
+ "downloadRecommendedModel": "下载经过工具使用训练的模型:"
+ },
+ "nextButton": "下一步",
+ "letsGoButton": "加载模型并启动服务器",
+ "doneButton": "关闭"
}
}
diff --git a/zh-CN/settings.json b/zh-CN/settings.json
index 9e3717b9..4285ca15 100644
--- a/zh-CN/settings.json
+++ b/zh-CN/settings.json
@@ -3,8 +3,8 @@
"settingsDialogButtonTooltip": "应用设置",
"settingsNewButtonPopover": {
- "primary": "应用设置现已移至右下角",
- "secondary": "点击⚙️按钮以打开它们。",
+ "primary": "应用设置现在位于右下角",
+ "secondary": "点击 ⚙️ 按钮打开它们。",
"tertiary": "或者按"
},
"appUpdate": "应用更新",
@@ -14,72 +14,87 @@
"newUpdateAvailable": "LM Studio 的新版本可用!🎉",
"newBetaUpdateAvailable": "LM Studio 的新测试版可用!🛠️🎉",
"downloadingInProgress": "正在下载更新...",
- "downloadUpdate": "更新至 LM Studio {{version}}",
- "downloadBetaUpdate": "更新至 LM Studio 测试版 {{version}} (构建号 {{build}})",
+ "downloadUpdate": "更新到 LM Studio {{version}}",
+ "downloadBetaUpdate": "更新到 LM Studio 测试版 {{version}}(构建 {{build}})",
"downloadCompleted": "下载完成!",
- "updateDownloadComplete": "更新下载成功!",
+ "updateDownloadComplete": "LM Studio 更新已准备好",
"updateDownloadFailed": "更新失败!",
"hasFinishedDownloading": "已下载完毕。",
- "yourCurrentVersion": "您当前的版本为:",
- "latestVersion": "最新版本为:",
+ "yourCurrentVersion": "您当前的版本是:",
+ "latestVersion": "最新版本是:",
"downloadLabel": "立即更新",
+ "downloadLabel/Linux": "下载更新",
"cancelDownloadLabel": "取消",
- "downloadingUpdate": "正在下载更新...",
- "updateDownloaded": "新更新已成功下载。重启应用以应用更新。",
- "restartAppToUpdate": "重新启动应用以应用更新",
- "appUpdatedToastTitle": "已更新至 {{title}}",
- "appUpdatedToastDescriptionPrefix": "查看",
+ "downloadingUpdate": "正在下载 {{item}}...",
+ "updateDownloaded": "应用程序需要重启以应用更新",
+ "restartAppToUpdate": "重新启动应用程序以应用更新",
+ "appUpdatedToastTitle": "已更新到 {{title}}",
+ "appUpdatedToastDescriptionPrefix": "查看 ",
"AppUpdatedToastDescriptionReleaseNotes": "发行说明",
- "doItLater": "稍后再说",
- "failedToUpdate": "应用更新失败。请检查您的网络连接或稍后再试。",
+ "toolUseToastTitle": "测试版新增功能:工具使用和函数调用 API",
+ "toolUseToastDescription": "可直接替代 OpenAI 工具使用,支持 Llama 3.1/3.2、Mistral 和 Qwen 等模型。",
+ "toolUseToastButtonText": "前往开发者页面试用",
+ "doItLater": "我稍后处理",
+ "failedToUpdate": "应用程序更新失败。请检查您的网络连接或稍后重试。",
"retryInBackground": "后台重试",
"laterLabel": "稍后",
"releaseNotesLabel": "发行说明",
"remindMeLater": "稍后提醒我",
"failedDownloadUpdate": "下载更新失败",
"installAndRelaunch": "安装并重新启动",
- "uptodate": "您的应用已是最新版本!当前版本为 {{version}}",
+ "uptodate": "您已是最新版本!当前版本为 {{version}}",
"preferences": "偏好设置",
- "general": "常规",
+ "general": "通用",
"sideButtonLabels": "显示侧边按钮标签",
+ "showModelFileNames": "我的模型:始终显示完整的模型文件名",
"colorThemeLabel": "颜色主题",
"complexityLevelLabel": "用户界面复杂度级别",
- "selectComplexityLevelPlaceholder": "选择默认的UI复杂度级别",
+ "selectComplexityLevelPlaceholder": "选择默认的 UI 复杂度级别",
"userComplexityLevelLabel": "普通用户",
"powerUserComplexityLevelLabel": "高级用户",
"developerComplexityLevelLabel": "开发者",
"chatSettingsLabel": "聊天设置",
- "chat/alwaysShowPromptTemplate": "始终在聊天侧栏显示提示模板",
- "chat/highlightChatMessageOnHover": "鼠标悬停时高亮显示聊天消息",
- "chat/doubleClickMessageToEdit": "双击聊天消息以编辑",
+ "chat/alwaysShowPromptTemplate": "始终在聊天侧边栏中显示提示模板",
+ "chat/highlightChatMessageOnHover": "悬停时高亮聊天消息",
+ "chat/doubleClickMessageToEdit": "双击聊天消息进行编辑",
- "chat/keyboardShortcuts/label": "键盘快捷键",
+ "chat/aiNaming/label": "聊天 AI 命名",
+ "chat/aiNaming/mode/label": "AI 生成的聊天名称",
+ "chat/aiNaming/mode/value/never": "从不",
+ "chat/aiNaming/mode/value/never/subTitle": "不创建 AI 生成的聊天名称",
+ "chat/aiNaming/mode/value/auto": "自动",
+ "chat/aiNaming/mode/value/auto/subTitle": "根据生成速度决定是否创建名称",
+ "chat/aiNaming/mode/value/always": "总是",
+ "chat/aiNaming/mode/value/always/subTitle": "无论生成速度如何,都创建 AI 生成的聊天名称",
+ "chat/aiNaming/emoji": "在 AI 生成的聊天名称中使用表情符号",
+ "chat/keyboardShortcuts/label": "快捷键",
"chat/keyboardShortcuts/verbPrefix": "使用",
"chat/keyboardShortcuts/regenerate": "重新生成聊天中的最后一条消息",
"chat/keyboardShortcuts/sendMessage": "发送消息",
- "onboarding/blockTitle": "入门提示",
- "onboarding/dismissedHints": "已关闭的入门提示",
- "onboarding/resetHintTooltip": "点击以重新启用此入门提示",
- "onboarding/resetAllHints": "重置所有入门提示",
- "onboarding/noneDismissed": "没有已关闭的提示,目前所有入门帮助提示都会出现直至下次关闭",
+ "onboarding/blockTitle": "引导提示",
+ "onboarding/dismissedHints": "已忽略的引导提示",
+ "onboarding/resetHintTooltip": "点击以重新启用此引导提示",
+ "onboarding/resetAllHints": "重置所有引导提示",
+ "onboarding/noneDismissed": "没有忽略的提示,目前所有引导提示都会显示,直到下次被忽略",
"firstTimeExperienceLabel": "首次聊天体验",
"firstTimeExperienceMarkCompletedLabel": "标记为已完成",
"firstTimeExperienceResetLabel": "重置",
- "showPromptSuggestionsLabel": "创建新聊天时显示提示建议",
+ "showPromptSuggestionsLabel": "在创建新聊天时显示提示建议",
"darkThemeLabel": "深色",
"lightThemeLabel": "浅色",
"systemThemeLabel": "自动",
- "sepiaThemeLabel": "护眼",
- "unloadPreviousModelLabel": "选择要加载的模型时,先卸载任何当前已加载的模型",
+ "sepiaThemeLabel": "棕褐色",
+ "unloadPreviousModelLabel": "选择要加载的模型时,先卸载当前已加载的模型",
"languageLabel": "语言",
- "changeLanguageLabel": "选择应用语言(仍在开发中)",
+ "changeLanguageLabel": "选择应用程序语言(仍在开发中)",
"developerLabel": "开发者",
+ "localServiceLabel": "本地 LLM 服务(无头模式)",
"showExperimentalFeaturesLabel": "显示实验性功能",
- "appFirstLoadLabel": "应用首次加载体验",
+ "appFirstLoadLabel": "应用程序首次加载体验",
"showDebugInfoBlocksInChatLabel": "在聊天中显示调试信息块",
- "autoLoadBundledLLMLabel": "启动时自动加载捆绑的大语言模型",
+ "autoLoadBundledLLMLabel": "启动时自动加载捆绑的 LLM 模型",
"showReleaseNotes": "显示发行说明",
"hideReleaseNotes": "隐藏发行说明",
@@ -91,45 +106,54 @@
"backendDownloadChannel.value.beta": "测试版",
"backendDownloadChannel.value.latest": "开发版",
"backendDownloadChannel.shortLabel": "运行环境下载通道",
- "backendDownloadChannel.hint": "选择从哪个通道下载 LM Studio 扩展包。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+ "backendDownloadChannel.hint": "选择下载 LM Studio 扩展包的通道。“{{stableName}}” 是推荐给大多数用户的通道。",
- "appUpdateChannel.label": "LM Studio 更新通道",
+ "appUpdateChannel.label": "更新通道",
"appUpdateChannel.value.stable": "稳定版",
"appUpdateChannel.value.beta": "测试版",
- "appUpdateChannel.shortLabel": "应用更新通道",
- "appUpdateChannel.hint": "选择从哪个通道接收 LM Studio 应用更新。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+ "appUpdateChannel.value.alpha": "Alpha",
+ "appUpdateChannel.shortLabel": "应用程序更新通道",
+ "appUpdateChannel.hint": "选择接收 LM Studio 应用程序更新的通道。“{{stableName}}” 是推荐给大多数用户的通道。",
- "modelLoadingGuardrails.label": "模型加载保护",
- "modelLoadingGuardrails.description": "超出系统资源限制加载模型可能导致系统不稳定或冻结。保护措施可以防止意外过载。如果需要,可以在这里调整这些限制,但请注意,接近系统极限加载模型可能会降低稳定性。",
+ "modelLoadingGuardrails.label": "模型加载护栏",
+ "modelLoadingGuardrails.description": "加载超出系统资源限制的模型可能会导致系统不稳定或冻结。护栏可以防止意外过载。如有必要,请在此处调整这些限制,但请注意,加载接近系统限制的模型可能会降低稳定性。",
"modelLoadingGuardrails.value.off": "关闭(不推荐)",
"modelLoadingGuardrails.value.off/subTitle": "不对系统过载采取预防措施",
"modelLoadingGuardrails.value.off/detail": "关闭详情",
"modelLoadingGuardrails.value.low": "宽松",
- "modelLoadingGuardrails.value.low/subTitle": "轻微预防系统过载",
+ "modelLoadingGuardrails.value.low/subTitle": "对系统过载采取轻微预防措施",
"modelLoadingGuardrails.value.low/detail": "宽松详情",
"modelLoadingGuardrails.value.medium": "平衡",
- "modelLoadingGuardrails.value.medium/subTitle": "适度预防系统过载",
+ "modelLoadingGuardrails.value.medium/subTitle": "对系统过载采取适度预防措施",
"modelLoadingGuardrails.value.medium/detail": "平衡详情",
"modelLoadingGuardrails.value.high": "严格",
- "modelLoadingGuardrails.value.high/subTitle": "强烈预防系统过载",
+ "modelLoadingGuardrails.value.high/subTitle": "对系统过载采取强力预防措施",
"modelLoadingGuardrails.value.high/detail": "严格详情",
"modelLoadingGuardrails.value.custom": "自定义",
- "modelLoadingGuardrails.value.custom/subTitle": "设置最大可加载模型大小的自定义限制",
+ "modelLoadingGuardrails.value.custom/subTitle": "设置自己的最大模型加载大小限制",
"modelLoadingGuardrails.value.custom/detail": "自定义详情",
"modelLoadingGuardrails.custom.label": "内存限制:",
"modelLoadingGuardrails.custom.unitGB": "GB",
- "modelLoadingGuardrails.custom.description": "为模型加载设置自定义内存限制。如果加载模型将超过此限制,则不会加载模型。",
+ "modelLoadingGuardrails.custom.description": "设置自定义内存限制以加载模型。如果加载模型会超过此限制,则不会加载模型。",
"experimentalLoadPresets": "在预设中启用模型加载配置支持",
- "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能尚处于试验阶段,我们欢迎反馈。",
+ "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能为实验性,我们欢迎反馈。",
- "promptWhenCommittingUnsavedChangesWithNewFields": "提交新字段到预设时显示确认对话框",
- "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果您想避免意外向预设添加新字段,这将非常有用。",
+ "unloadPreviousJITModelOnLoad": "JIT 模型自动驱逐:确保在任何时间点最多只有一个通过 JIT 加载的模型(卸载之前的模型)",
+ "autoUpdateExtensionPacks": "自动更新选定的运行环境扩展包",
+ "useHFProxy.label": "使用 LM Studio 的 Hugging Face 代理",
+ "useHFProxy.hint": "使用 LM Studio 的 Hugging Face 代理来搜索和下载模型。这可以帮助那些无法直接访问 Hugging Face 的用户。",
+ "separateReasoningContentInResponses": "在适用的情况下,在 API 响应中分离 `reasoning_content` 和 `content`",
+ "separateReasoningContentInResponses/hint": "此设置仅适用于 DeepSeek R1 及其蒸馏变体等“推理”模型,以及其他在 `` 和 `` 标签中生成 CoT 的模型。",
- "autoStartOnLogin": "登录时自动启动LLM服务",
- "autoStartOnLogin.description": "当您登录计算机时自动启动LLM服务",
+ "promptWhenCommittingUnsavedChangesWithNewFields": "预设:在将新字段提交到预设时显示确认对话框",
+ "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果您希望防止意外将新字段添加到预设中,此功能非常有用",
- "expandConfigsOnClick": "点击而非悬停时展开配置",
+ "enableLocalService": "启用本地 LLM 服务",
+ "enableLocalService.subtitle": "无需保持 LM Studio 应用程序打开即可使用 LM Studio 的 LLM 服务器",
+ "enableLocalService.description": "启用后,LM Studio 本地 LLM 服务将在启动时启动。关闭 LM Studio 后,本地 LLM 服务将继续在系统托盘中运行。",
+
+ "expandConfigsOnClick": "单击展开配置,而不是悬停",
"migrateChats": {
"label": "迁移 0.3.0 之前的聊天记录",
@@ -138,22 +162,22 @@
"action_other": "迁移 {{count}} 条聊天记录",
"inProgress": "正在迁移聊天记录...",
"hint": {
- "primary": "我们对 v0.3.0+ 版本的聊天记录内部数据结构进行了改造,以支持多版本聊天消息等功能。为了让旧聊天记录出现在应用中,需要将其迁移到新格式。",
- "details": "迁移过程不会删除您的旧聊天记录,而是会创建一个新格式的副本。",
+ "primary": "我们为 v0.3.0+ 聊天记录改进了内部数据结构,以支持多版本聊天消息等功能。为了让旧聊天记录出现在应用程序中,需要将它们迁移到新格式。",
+ "details": "迁移过程不会删除您的旧聊天记录,而是以新格式复制它们。",
"footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前,图片不会自动迁移。"
},
"hasBetterHint": {
- "primary": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您想要再次运行它吗?",
- "details": "迁移过程将创建一个包含新迁移聊天记录的新文件夹。您的旧聊天记录将保持不变。",
+ "primary": "自从您上次迁移旧聊天记录以来,我们改进了聊天迁移工具。您想再次运行它吗?",
+ "details": "迁移过程将创建一个新文件夹以包含新迁移的聊天记录。您的旧聊天记录将保持不变。",
"footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前,图片不会自动迁移。"
},
- "success": "聊天记录迁移成功!",
- "success_one": "1 条聊天记录迁移成功",
- "success_other": "{{count}} 条聊天记录迁移成功",
- "showInstructionsButton": "显示指南",
- "footerCardText": "来自 LM Studio 早期版本的聊天记录需要迁移才能在此版本中使用。",
- "hasBetterFooterCardText": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您可以重新运行迁移过程。(我们将创建一个包含新迁移聊天记录的新文件夹。)",
- "dismissConfirm": "关闭",
+ "success": "成功迁移聊天记录!",
+ "success_one": "成功迁移 1 条聊天记录",
+ "success_other": "成功迁移 {{count}} 条聊天记录",
+ "showInstructionsButton": "显示说明",
+ "footerCardText": "来自以前版本的 LM Studio 的聊天记录需要迁移才能在此版本中使用。",
+ "hasBetterFooterCardText": "自从您上次迁移旧聊天记录以来,我们改进了聊天迁移工具。您可以重新运行迁移过程。(我们将创建一个新文件夹以包含新迁移的聊天记录。)",
+ "dismissConfirm": "忽略",
"dismissConfirmDescription": "您随时可以在设置中处理聊天记录迁移"
}
}
diff --git a/zh-CN/shared.json b/zh-CN/shared.json
index 2928ec08..67ca751e 100644
--- a/zh-CN/shared.json
+++ b/zh-CN/shared.json
@@ -1,5 +1,38 @@
{
"copyLmStudioLinkButton/toolTip": "复制模型下载链接",
-
- "filter.noMatches": "没有匹配项"
-}
\ No newline at end of file
+
+ "filter.noMatches": "无匹配项",
+ "longRunningTask": {
+ "unbundlingDependencies": {
+ "badge": "正在解压资源"
+ },
+ "performingBackendHardwareSurvey": {
+ "badge": "正在检查运行环境兼容性"
+ },
+ "indexingRuntimes": {
+ "badge": "正在索引运行环境"
+ },
+ "indexingModels": {
+ "badge": "正在索引模型"
+ },
+ "authenticating": {
+ "badge": "正在认证"
+ },
+ "autoUpdatingExtensionPack": {
+ "badge": "正在更新扩展包 ({{name}} v{{version}})"
+ }
+ },
+ "auth": {
+ "prompt": "登录到 LM Studio Hub",
+ "authError": "认证失败",
+ "noAccount": "还没有账户?",
+ "signUp": "注册",
+ "havingTrouble": "遇到问题?",
+ "retry": "重试"
+ },
+ "artifacts": {
+ "fetchError": "获取构件失败"
+ },
+ "incompatible": "不兼容",
+ "compatible": "兼容"
+}
diff --git a/zh-CN/sidebar.json b/zh-CN/sidebar.json
index ff7f6346..f548032a 100644
--- a/zh-CN/sidebar.json
+++ b/zh-CN/sidebar.json
@@ -3,7 +3,7 @@
"discover": "发现",
"myModels": "我的模型",
"developer": "开发者",
- "runtimes": "运行时间",
+ "runtimes": "运行环境",
"settings": "设置",
"download": "下载"
}