Skip to content
Open
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
a8e59a7
Iris: add MCQ generation instructions to chat system prompts
alretum Mar 8, 2026
543fa90
Show loading message during question generation
alretum Mar 10, 2026
f52bef4
Add multi-question generation instructions to system prompts
alretum Mar 10, 2026
cf2728a
Add tests for MCQ generation prompt block rendering in chat templates
alretum Mar 11, 2026
e2e3d09
Add MCQ generation documentation to Iris README
alretum Mar 11, 2026
ef6ac18
Fix Pyris startup command
alretum Mar 11, 2026
7956aba
Merge branch 'feature/iris/quiz-questions' into feature/iris/quiz-que…
alretum Mar 11, 2026
a82fc03
Merge branch 'main' into feature/iris/quiz-questions
alretum Mar 15, 2026
db32adb
Merge remote-tracking branch 'origin/feature/iris/quiz-questions' int…
alretum Mar 15, 2026
cab6871
Add chatMessage field to StageDTO for dynamic chat update
alretum Mar 15, 2026
5db4303
Extend in_progress() to accept chat_message parameter
alretum Mar 15, 2026
e7a74e3
Add dedicated MCQ generation prompt template
alretum Mar 15, 2026
1cfeef1
Add MCQ generation subpipeline
alretum Mar 15, 2026
1195ab6
Add MCQ generation tool for agent use
alretum Mar 15, 2026
c0fb7d0
Register MCQ tool in course chat pipeline
alretum Mar 15, 2026
4ce15ce
Register MCQ tool in lecture chat pipeline
alretum Mar 15, 2026
afa2f8f
Replace MCQ prompt blocks with tool usage instructions
alretum Mar 15, 2026
29c21ff
Simplify MCQ intent detection after tool migration
alretum Mar 15, 2026
9e9460f
Add parallel MCQ generation with background thread and queue-based de…
alretum Mar 15, 2026
3c7a82a
Add tests for parallel MCQ generation and prompt rendering
alretum Mar 15, 2026
7633265
Optimize MCQ generation with parallel workers, faster model, and impr…
alretum Mar 15, 2026
7b06311
Fix MCQ count accuracy by capping results and clarifying worker prompts
alretum Mar 15, 2026
c978688
Add graceful fallback for Cohere rerank failures
alretum Mar 18, 2026
79b8fc2
Add lecture content and citations to MCQ generation
alretum Mar 18, 2026
5efc82e
Retrieve lecture content and add citations for parallel MCQ
alretum Mar 18, 2026
3668bc8
Add fallback message to user if MCQ generation fails
alretum Mar 18, 2026
9b262cf
Pass lecture content to MCQ tool to prevent hallucinated questions
alretum Mar 19, 2026
409e5ab
Merge main into feature/iris/quiz-questions-v2
alretum Mar 23, 2026
fa24aef
Fix StageDTO silently dropping chat_message by enabling populate_by_name
alretum Mar 24, 2026
1a1a45f
Pass lecture context to MCQ tool in non-parallel branch of course cha…
alretum Mar 24, 2026
c071ab9
Merge branch 'main' into feature/iris/quiz-questions-v2
alretum Mar 24, 2026
04d5905
Restore missing json import in course chat pipeline
alretum Mar 24, 2026
e7d0526
Add MCQ citation enrichment to non-parallel tool fallback path
alretum Mar 24, 2026
b4fe988
Guard against non-dict JSON in MCQ citation processing
alretum Mar 24, 2026
98f4e1d
Guard MCQ token tracking against still-running thread
alretum Mar 25, 2026
75eb003
Extract shared MCQ logic into mixin and fix lecture chat slowness
alretum Mar 31, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions iris/.claudeignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
application.example.yml
application.local.yml
llm_config.local.yml
llm_config.example.yml
1 change: 1 addition & 0 deletions iris/src/iris/common/pipeline_enum.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,6 @@ class PipelineEnum(str, Enum):
IRIS_LECTURE_SUMMARY_PIPELINE = "IRIS_LECTURE_SUMMARY_PIPELINE"
IRIS_TUTOR_SUGGESTION_PIPELINE = "IRIS_TUTOR_SUGGESTION_PIPELINE"
IRIS_SESSION_TITLE_GENERATION_PIPELINE = "IRIS_SESSION_TITLE_GENERATION_PIPELINE"
IRIS_MCQ_GENERATION_PIPELINE = "IRIS_MCQ_GENERATION_PIPELINE"
IRIS_LECTURE_SEARCH_ANSWER_PIPELINE = "IRIS_LECTURE_SEARCH_ANSWER_PIPELINE"
NOT_SET = "NOT_SET"
5 changes: 4 additions & 1 deletion iris/src/iris/domain/status/stage_dto.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
from typing import Optional

from pydantic import BaseModel, Field
from pydantic import BaseModel, ConfigDict, Field

from iris.domain.status.stage_state_dto import StageStateEnum


class StageDTO(BaseModel):
model_config = ConfigDict(populate_by_name=True)

name: Optional[str] = None
weight: int
state: StageStateEnum
message: Optional[str] = None
internal: bool = Field(
default=False
) # An internal stage is not shown in the UI and hidden from the user
chat_message: Optional[str] = Field(alias="chatMessage", default=None)
36 changes: 27 additions & 9 deletions iris/src/iris/llm/request_handler/rerank_request_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@
from langchain_core.tools import BaseTool
from pydantic import BaseModel, ConfigDict

from iris.common.logging_config import get_logger
from iris.common.pyris_message import PyrisMessage
from iris.domain.data.image_message_content_dto import ImageMessageContentDTO
from iris.llm import CompletionArguments, RequestHandler
from iris.llm.external.model import LanguageModel
from iris.llm.llm_manager import LlmManager

logger = get_logger(__name__)


class RerankRequestHandler(RequestHandler):
"""RerankRequestHandler handles document reranking based on a query and a specified content field using a language
Expand All @@ -17,6 +20,7 @@ class RerankRequestHandler(RequestHandler):

model_id: str
llm_manager: LlmManager | None = None
_rerank_available: bool = True
model_config = ConfigDict(arbitrary_types_allowed=True)

def __init__(self, model_id: str):
Expand Down Expand Up @@ -63,18 +67,32 @@ def rerank(self, query, documents: List, top_n: int, content_field_name: str):
if not valid_documents:
return []

# Skip Cohere entirely after first failure to avoid repeated timeout delays
if not self._rerank_available:
return valid_documents[:top_n]

document_contents = list(
map(lambda x: getattr(x, content_field_name), valid_documents)
)

cohere_client = self.llm_manager.get_llm_by_id(self.model_id)

_, reranked_results, _ = cohere_client.rerank(
query=query,
documents=document_contents,
top_n=top_n,
)
ranked_documents = []
for result in reranked_results[1]:
ranked_documents.append(valid_documents[result.index])
return ranked_documents
try:
_, reranked_results, _ = cohere_client.rerank(
query=query,
documents=document_contents,
top_n=top_n,
)
ranked_documents = []
for result in reranked_results[1]:
ranked_documents.append(valid_documents[result.index])
return ranked_documents
except Exception as e:
logger.warning(
"Reranking failed, disabling for subsequent calls. "
"Returning top %d unranked documents: %s",
top_n,
str(e),
)
RerankRequestHandler._rerank_available = False
return valid_documents[:top_n]
Loading
Loading