@@ -32,16 +32,22 @@ size_t LegacyExecutor::requestsQueueSize() {
3232
3333void LegacyExecutor::processRequest () {
3434 OVMS_PROFILE_FUNCTION ();
35- SPDLOG_LOGGER_TRACE (llm_executor_logger, " Generation started" );
36- try {
37- requests.front ()->results = pipe->generate (requests.front ()->inputIds , requests.front ()->generationConfigBuilder ->getConfig (), requests.front ()->textStreamer );
38- } catch (std::exception& e) {
39- requests.front ()->success = false ;
40- SPDLOG_LOGGER_ERROR (llm_executor_logger, " LLM pipeline generation failed: {}." , e.what ());
35+ auto & requestExecutionContext = requests.front ();
36+ if (requestExecutionContext->clientDisconnected ) {
37+ requestExecutionContext->success = false ;
38+ SPDLOG_LOGGER_DEBUG (llm_executor_logger, " Client disconnected, skipping request processing." );
39+ } else {
40+ SPDLOG_LOGGER_TRACE (llm_executor_logger, " Generation started" );
41+ try {
42+ requestExecutionContext->results = pipe->generate (requestExecutionContext->inputIds , requestExecutionContext->generationConfigBuilder ->getConfig (), requestExecutionContext->textStreamer );
43+ } catch (std::exception& e) {
44+ requestExecutionContext->success = false ;
45+ SPDLOG_LOGGER_ERROR (llm_executor_logger, " LLM pipeline generation failed: {}." , e.what ());
46+ }
47+ SPDLOG_LOGGER_TRACE (llm_executor_logger, " Generation ended" );
4148 }
42- SPDLOG_LOGGER_TRACE (llm_executor_logger, " Generation ended" );
43- requests.front ()->readySignal .set_value ();
44- requests.front ()->executionInProgress .notify_one ();
49+ requestExecutionContext->readySignal .set_value ();
50+ requestExecutionContext->executionInProgress .notify_one ();
4551 std::unique_lock<std::mutex> lock (queueMutex);
4652 requests.pop ();
4753}
0 commit comments