Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions education-ai-suite/smart-classroom/README.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
# 🎓 Smart Classroom

The **Smart Classroom** project is a modular, extensible framework designed to process and summarize educational content using advanced AI models. It supports transcription, summarization, and future capabilities like video understanding and real-time analysis.
The **Smart Classroom** project is a modular, extensible framework designed to process and summarize educational content using advanced AI models. It supports transcription, summarization, mindmap generation and future capabilities like video understanding and real-time analysis.

The main features are as follows:

- Audio transcription with ASR models (e.g., Whisper, Paraformer)
- Summarization using powerful LLMs (e.g., Qwen, LLaMA)
- Plug-and-play architecture for integrating new ASR and LLM models
- API-first design ready for frontend integration
- Extensible roadmap for real-time streaming, diarization, translation, and video analysis
• Audio transcription with ASR models (e.g., Whisper, Paraformer)\
• Summarization using powerful LLMs (e.g., Qwen, LLaMA)\
• MindMap Generation using Mermaid.js for visual diagram rendering of the summary\
• Plug-and-play architecture for integrating new ASR and LLM models\
• API-first design ready for frontend integration\
• Extensible roadmap for real-time streaming, diarization, translation, and video analysis

![Smart Classroom UI](./docs/user-guide/images/smart_classroom_ui.png)

Expand Down
18 changes: 18 additions & 0 deletions education-ai-suite/smart-classroom/api/endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,24 @@ async def event_stream():

return StreamingResponse(event_stream(), media_type="application/json")

@router.post("/mindmap")
async def generate_mindmap(request: SummaryRequest):
if audio_pipeline_lock.locked():
raise HTTPException(status_code=429, detail="Session Active, Try Later")
pipeline = Pipeline(request.session_id)
try:
mindmap_text = pipeline.run_mindmap()
logger.info("Mindmap generated successfully.")
return {"mindmap": mindmap_text, "error": ""}
except HTTPException as http_exc:
raise http_exc
except Exception as e:
logger.exception(f"Error during mindmap generation: {e}")
raise HTTPException(
status_code=500,
detail=f"Mindmap generation failed: {e}"
)

@router.get("/performance-metrics")
def get_summary_metrics(session_id: Optional[str] = Header(None, alias="session_id")):
project_config = RuntimeConfig.get_section("Project")
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
from components.base_component import PipelineComponent
from components.llm.openvino.summarizer import Summarizer as OvSummarizer
from components.llm.ipex.summarizer import Summarizer as IpexSummarizer
from utils.runtime_config_loader import RuntimeConfig
from utils.config_loader import config
from utils.storage_manager import StorageManager
import logging, os

logger = logging.getLogger(__name__)

class MindmapComponent(PipelineComponent):
_model = None
_config = None

def __init__(self, session_id, provider, model_name, device, temperature=0.7):
self.session_id = session_id
provider = provider.lower()
config_key = (provider, model_name, device)

if MindmapComponent._model is None or MindmapComponent._config != config_key:
if provider == "openvino":
MindmapComponent._model = OvSummarizer(
model_name=model_name,
device=device,
temperature=temperature,
revision=None
)
elif provider == "ipex":
MindmapComponent._model = IpexSummarizer(
model_name=model_name,
device=device.lower(),
temperature=temperature
)
else:
raise ValueError(f"Unsupported summarizer provider: {provider}")

MindmapComponent._config = config_key

self.summarizer = MindmapComponent._model
self.model_name = model_name
self.provider = provider

def _get_mindmap_message(self, input_text):
lang_prompt = vars(config.mindmap.system_prompt)
logger.debug(f"Mindmap System Prompt: {lang_prompt.get(config.models.summarizer.language)}")
return [
{"role": "system", "content": f"{lang_prompt.get(config.models.summarizer.language)}"},
{"role": "user", "content": f"{input_text}"}
]

def generate_mindmap(self, summary_text):
"""
Generate a complete mindmap string (non-streaming).
"""
project_config = RuntimeConfig.get_section("Project")
project_path = os.path.join(
project_config.get("location"),
project_config.get("name"),
self.session_id
)
mindmap_path = os.path.join(project_path, "mindmap.mmd")

try:
logger.info("Generating mindmap from summary...")
mindmap_prompt = self.summarizer.tokenizer.apply_chat_template(
self._get_mindmap_message(summary_text),
tokenize=False,
add_generation_prompt=True
)

# Generate tokens
mindmap_streamer = self.summarizer.generate(mindmap_prompt)
full_mindmap = "".join(token for token in mindmap_streamer)

# Save full mindmap to file
StorageManager.save(mindmap_path, full_mindmap, append=False)
logger.info("Mindmap generation completed successfully.")
return full_mindmap

except Exception as e:
logger.error(f"Mindmap generation failed: {e}")
raise e
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def _get_message(self, input):
{"role": "system", "content": f"{lang_prompt.get(config.models.summarizer.language)}"},
{"role": "user", "content": f"{input}"}
]

def process(self, input):
project_config = RuntimeConfig.get_section("Project")
project_path = os.path.join(project_config.get("location"), project_config.get("name"), self.session_id)
Expand Down Expand Up @@ -100,6 +100,7 @@ def process(self, input):
"performance.end_to_end_time": f"{round(end_to_end_time, 4)}s",
}
)





6 changes: 6 additions & 0 deletions education-ai-suite/smart-classroom/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,12 @@ models:
zh: "你是一个课堂教学助手,根据提供的原始课堂音频的转录文本,以Markdown格式提炼出本节课的核心内容、知识点结构、讲解顺序,解释每个知识点后,用简洁清晰的语言进行总结。注意不要遗漏主要知识点,也不要捏造任何没有提及的知识点,总结要保证知识点真实准确,避免任何冗余或误导性内容。"
model_hub: huggingface # huggingface or modelscope

mindmap:
system_prompt:
en: "Generate a Mermaid mindmap with proper syntax. Use exactly this format: Start with 'mindmap', then one root node with double parentheses, then child nodes with proper indentation (2 spaces per level). No bullet points, no multiple root nodes. There must be exactly ONE 'mindmap' declaration and ONE root node. Example: mindmap\n root((Main Topic))\n Child1\n Subchild1\n Subchild2\n Child2\n Subchild3"
zh: "生成正确语法的Mermaid思维导图。只允许一个'mindmap'根节点,并以'mindmap'开头。必须且只能有一个根节点((主题)),所有内容都应在同一个思维导图中,从该根节点展开。不要重复'mindmap',不要使用多个根节点。不要使用Markdown格式(不要包含```mermaid或```)。示例:mindmap\n root((课堂主题))\n 部分一\n 子节点A\n 子节点B\n 部分二\n 子节点C"
min_token: 20

audio_preprocessing:
chunk_duration_sec: 30
silence_threshold: -35 # in dB
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
83 changes: 82 additions & 1 deletion education-ai-suite/smart-classroom/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import logging, os
from utils.session_manager import generate_session_id
from components.summarizer_component import SummarizerComponent
from components.mindmap_component import MindmapComponent
from utils.runtime_config_loader import RuntimeConfig
from utils.storage_manager import StorageManager
from monitoring import monitor
Expand All @@ -25,6 +26,16 @@ def __init__(self, session_id=None):
SummarizerComponent(self.session_id, provider=config.models.summarizer.provider, model_name=config.models.summarizer.name, temperature=config.models.summarizer.temperature, device=config.models.summarizer.device)
]

self.mindmap_pipeline = [
MindmapComponent(
self.session_id,
provider=config.models.summarizer.provider,
model_name=config.models.summarizer.name,
temperature=config.models.summarizer.temperature,
device=config.models.summarizer.device
)
]

def run_transcription(self, audio_path: str):
project_config = RuntimeConfig.get_section("Project")
monitor.start_monitoring(os.path.join(project_config.get("location"), project_config.get("name"), self.session_id, "utilization_logs"))
Expand Down Expand Up @@ -68,4 +79,74 @@ def run_summarizer(self):
yield token
finally:
monitor.stop_monitoring()

time.sleep(3)

def run_mindmap(self):
"""
Generate a mindmap separately from an existing summary.md file.
"""
project_config = RuntimeConfig.get_section("Project")
session_dir = os.path.join(
project_config.get("location"),
project_config.get("name"),
self.session_id
)
summary_path = os.path.join(session_dir, "summary.md")
min_tokens = config.mindmap.min_token

# Start resource utilization monitoring
monitor.start_monitoring(os.path.join(session_dir, "utilization_logs"))

try:
summary_text = StorageManager.read_text_file(summary_path)
if not summary_text:
logger.error("Summary is empty. Cannot generate mindmap.")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Summary is empty. Cannot generate mindmap."
)
except FileNotFoundError:
logger.error(f"Invalid Session ID: {self.session_id}")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Invalid session id: {self.session_id}, summary not found."
)
except Exception as e:
logger.error(f"Unexpected error while accessing summary: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="An unexpected error occurred while accessing the summary."
)

token_count = len(summary_text.split())
logger.info(f"Summary token count: {token_count}, Minimum required: {min_tokens}")
if token_count < min_tokens:
logger.warning("Insufficient information to generate mindmap.")
insufficient_mindmap = (
"mindmap\n"
" root((Insufficient Input))\n"
" The summary is too short to generate a meaningful mindmap."
)
mindmap_path = os.path.join(session_dir, "mindmap.mmd")
StorageManager.save(mindmap_path, insufficient_mindmap, append=False)
monitor.stop_monitoring()
return insufficient_mindmap

try:
full_mindmap = ""
for component in self.mindmap_pipeline:
mindmap_text = component.generate_mindmap(summary_text)
full_mindmap += mindmap_text

logger.info("Mindmap generation successful.")
return full_mindmap

except Exception as e:
logger.error(f"Error during mindmap generation: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error during mindmap generation: {e}"
)

finally:
monitor.stop_monitoring()
Loading