diff --git a/web-app/src/containers/ChatInput.tsx b/web-app/src/containers/ChatInput.tsx index ffa9a0245c..f488516994 100644 --- a/web-app/src/containers/ChatInput.tsx +++ b/web-app/src/containers/ChatInput.tsx @@ -27,12 +27,15 @@ import { useGeneralSetting } from '@/hooks/useGeneralSetting' import { useModelProvider } from '@/hooks/useModelProvider' import { useAppState } from '@/hooks/useAppState' -import { MovingBorder } from './MovingBorder' import { useChat } from '@/hooks/useChat' +import { MovingBorder } from './MovingBorder' import DropdownModelProvider from '@/containers/DropdownModelProvider' import { ModelLoader } from '@/containers/loaders/ModelLoader' import DropdownToolsAvailable from '@/containers/DropdownToolsAvailable' import { useServiceHub } from '@/hooks/useServiceHub' +import { getConnectedServers } from '@/services/mcp' +import { useRouter } from '@tanstack/react-router' +import { route } from '@/constants/routes' type ChatInputProps = { className?: string @@ -52,14 +55,32 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { loadingModel, tools, cancelToolCall, + addToThreadQueue, + getThreadQueueLength, + setThreadPrompt, + getThreadPrompt, } = useAppState() - const { prompt, setPrompt } = usePrompt() - const { currentThreadId } = useThreads() + const { prompt: globalPrompt, setPrompt: setGlobalPrompt } = usePrompt() + const { currentThreadId, createThread } = useThreads() + + // Use thread-aware prompt state + const prompt = currentThreadId + ? getThreadPrompt(currentThreadId) + : globalPrompt + const setPrompt = currentThreadId + ? (value: string) => setThreadPrompt(currentThreadId, value) + : setGlobalPrompt const { t } = useTranslation() const { spellCheckChatInput } = useGeneralSetting() + const router = useRouter() const maxRows = 10 + // Get current thread's queue information + const currentThreadQueueLength = currentThreadId + ? getThreadQueueLength(currentThreadId) + : 0 + const { selectedModel, selectedProvider } = useModelProvider() const { sendMessage } = useChat() const [message, setMessage] = useState('') @@ -105,7 +126,9 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { try { // Only check mmproj for llamacpp provider if (selectedProvider === 'llamacpp') { - const hasLocalMmproj = await serviceHub.models().checkMmprojExists(selectedModel.id) + const hasLocalMmproj = await serviceHub + .models() + .checkMmprojExists(selectedModel.id) setHasMmproj(hasLocalMmproj) } // For non-llamacpp providers, only check vision capability @@ -130,7 +153,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { // Check if there are active MCP servers const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0 - const handleSendMesage = (prompt: string) => { + const handleSendMessage = async (prompt: string) => { if (!selectedModel) { setMessage('Please select a model to start chatting.') return @@ -139,12 +162,42 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { return } setMessage('') - sendMessage( - prompt, - true, - uploadedFiles.length > 0 ? uploadedFiles : undefined - ) - setUploadedFiles([]) + + // Create thread if none exists, otherwise use current thread + if (!currentThreadId) { + try { + // Create a new thread for the initial message + if (!selectedModel) { + setMessage('Please select a model to create a new conversation.') + return + } + const threadModel: ThreadModel = { + id: selectedModel.id, + provider: selectedProvider, + } + const newThread = await createThread( + threadModel, + prompt.trim().slice(0, 50) + ) + + // Navigate to the new thread + router.navigate({ + to: route.threadsDetail, + params: { threadId: newThread.id }, + }) + + // Queue the message after navigation + addToThreadQueue(newThread.id, prompt.trim()) + } catch (error) { + console.error('Failed to create thread:', error) + setMessage('Failed to create new conversation.') + return + } + } else { + // Always queue messages - let scheduler decide when to process + addToThreadQueue(currentThreadId, prompt.trim()) + } + setPrompt('') } useEffect(() => { @@ -543,12 +596,11 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { )} { setPrompt(e.target.value) // Count the number of newlines to estimate rows @@ -559,17 +611,11 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { // e.keyCode 229 is for IME input with Safari const isComposing = e.nativeEvent.isComposing || e.keyCode === 229 - if ( - e.key === 'Enter' && - !e.shiftKey && - prompt.trim() && - !isComposing - ) { + if (e.key === 'Enter' && !isComposing && !e.shiftKey) { e.preventDefault() - // Submit the message when Enter is pressed without Shift - handleSendMesage(prompt) - // When Shift+Enter is pressed, a new line is added (default behavior) + handleSendMessage(prompt) // Use same handler as send button } + // Shift+Enter: Allow default behavior (new line) }} onPaste={handlePaste} placeholder={t('common:placeholder.chatInput')} @@ -748,6 +794,16 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { + {/* Enhanced Queue Indicator */} + {currentThreadQueueLength > 0 && ( +
+
+ {currentThreadQueueLength} message + {currentThreadQueueLength === 1 ? '' : 's'} queued +
+
+ )} + {streamingContent ? (