Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 28 additions & 9 deletions chatbot-core/api/prompts/prompt_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,25 @@
Constructs the prompt used for querying the LLM, including system-level instructions,
chat history, context retrieved from the knowledge base, and the user's question.
"""

from typing import Optional
from langchain.memory import ConversationBufferMemory
from api.prompts.prompts import SYSTEM_INSTRUCTION

def build_prompt(user_query: str, context: str, memory: ConversationBufferMemory) -> str:
from api.prompts.prompts import SYSTEM_INSTRUCTION, LOG_ANALYSIS_INSTRUCTION

def build_prompt(
user_query: str,
context: str,
memory: ConversationBufferMemory,
log_context: Optional[str] = None
) -> str:
"""
Build the full prompt by combining system instructions, chat history, context,and user question.
Build the full prompt by combining system instructions, chat history, context,
user question, and optional log data.

Args:
user_query (str): The raw question from the user.
context (str): The relevant retrieved chunks to ground the answer.
memory (ConversationBufferMemory): LangChain memory holding prior chat turns.
log_context (Optional[str]): Raw logs provided by the user (e.g. build failure logs).

Returns:
str: A structured prompt for the language model.
Expand All @@ -22,17 +29,29 @@ def build_prompt(user_query: str, context: str, memory: ConversationBufferMemory
history = "\n".join(
f"{'User' if msg.type == 'human' else 'Jenkins Assistant'}: {msg.content or ''}"
for msg in memory.chat_memory.messages
) if memory.chat_memory.messages else ""
) if memory.chat_memory.messages else ""
else:
history = ""

prompt = f"""{SYSTEM_INSTRUCTION}
# If log context exists, we append it as a specific section
if log_context:
system_prompt = LOG_ANALYSIS_INSTRUCTION
log_section = f"""
User-Provided Log Data:
{log_context}
"""
else:
# Otherwise, use the standard Friendly Assistant prompt
system_prompt = SYSTEM_INSTRUCTION
log_section = ""

prompt = f"""{system_prompt}
Chat History:
{history}

Context:
Context (Documentation & Knowledge Base):
{context}

{log_section}
User Question:
{user_query.strip()}

Expand Down
13 changes: 13 additions & 0 deletions chatbot-core/api/prompts/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,3 +258,16 @@

Relevance Analysis:
"""
LOG_ANALYSIS_INSTRUCTION = """
You are an expert Jenkins Log Analyzer.

Your SOLE goal is to identify the root cause of the build failure based on the provided logs.

PRIORITY RULES:
1. Focus 100% on the "User-Provided Log Data".
2. Identify specific error messages, exceptions, or exit codes (e.g., "Build step 'Execute Windows batch command' marked build as failure").
3. Only use the "Context" (documentation) if it helps explain the specific error found in the logs. If the context is unrelated, IGNORE IT.
4. Do not be conversational. Go straight to the point: "The build failed because..."

If the logs do not show a clear error, state: "I cannot find a specific error in the provided logs."
"""
27 changes: 25 additions & 2 deletions chatbot-core/api/services/chat_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@
retrieval_config = CONFIG["retrieval"]
CODE_BLOCK_PLACEHOLDER_PATTERN = r"\[\[(?:CODE_BLOCK|CODE_SNIPPET)_(\d+)\]\]"

LOG_ANALYSIS_PATTERN = re.compile(
r"Here are the last \d+ characters of the log:\s*```\s*(.*?)\s*```\s*(.*)",
re.DOTALL
)


def get_chatbot_reply(
session_id: str,
Expand All @@ -52,6 +57,24 @@ def get_chatbot_reply(
"""
logger.info("New message from session '%s'", session_id)
logger.info("Handling the user query: %s", user_input)

match = LOG_ANALYSIS_PATTERN.search(user_input)

actual_query = user_input
log_context = None

if match:
logger.info("Log Analysis Pattern detected. Separating logs from query.")
log_content = match.group(1).strip()
user_question = match.group(2).strip()

if not user_question:
user_question = "Please analyze the build failure in these logs."

log_context = log_content
actual_query = user_question

logger.info("Handling the user query: %s", actual_query)

if files:
logger.info("Processing %d uploaded file(s)", len(files))
Expand All @@ -61,7 +84,7 @@ def get_chatbot_reply(
raise RuntimeError(
f"Session '{session_id}' not found in the memory store.")

context = retrieve_context(user_input)
context = retrieve_context(actual_query)
logger.info("Context retrieved: %s", context)

# Add file context if files are provided
Expand All @@ -73,7 +96,7 @@ def get_chatbot_reply(
logger.info("File context added: %d characters", len(file_context))
context = f"{context}\n\n[User Uploaded Files]\n{file_context}"

prompt = build_prompt(user_input, context, memory)
prompt = build_prompt(actual_query, context, memory, log_context=log_context)

logger.info("Generating answer with prompt: %s", prompt)
reply = generate_answer(prompt)
Expand Down
5 changes: 3 additions & 2 deletions chatbot-core/rag/embedding/bm25_indexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,5 +104,6 @@ def get(self, index_name: str):
logger= LoggerFactory.instance().get_logger("bm25indexer")
)

if not CONFIG["is_test_mode"]:
indexer.build()
if __name__ == "__main__":
if not CONFIG["is_test_mode"]:
indexer.build()
76 changes: 67 additions & 9 deletions frontend/src/components/Chatbot.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import {
loadChatbotLastSessionId,
} from "../utils/chatbotStorage";
import { v4 as uuidv4 } from "uuid";
import { ProactiveToast } from "./Toast";
import { useContextObserver } from "../utils/useContextObserver";

/**
* Chatbot is the core component responsible for managing the chatbot display.
Expand All @@ -31,17 +33,19 @@ export const Chatbot = () => {
const [input, setInput] = useState("");
const [sessions, setSessions] = useState<ChatSession[]>(loadChatbotSessions);
const [currentSessionId, setCurrentSessionId] = useState<string | null>(
loadChatbotLastSessionId,
loadChatbotLastSessionId
);
const [isSidebarOpen, setIsSidebarOpen] = useState<boolean>(false);
const [isPopupOpen, setIsPopupOpen] = useState<boolean>(false);
const [sessionIdToDelete, setSessionIdToDelete] = useState<string | null>(
null,
null
);
const [attachedFiles, setAttachedFiles] = useState<File[]>([]);
const [supportedExtensions, setSupportedExtensions] =
useState<SupportedExtensions | null>(null);

const { showToast, setShowToast } = useContextObserver(isOpen);

/**
* Fetch supported file extensions on component mount.
*/
Expand Down Expand Up @@ -137,8 +141,8 @@ export const Chatbot = () => {
prevSessions.map((session) =>
session.id === currentSessionId
? { ...session, messages: [...session.messages, message] }
: session,
),
: session
)
);
};

Expand Down Expand Up @@ -178,8 +182,8 @@ export const Chatbot = () => {
prevSessions.map((session) =>
session.id === currentSessionId
? { ...session, isLoading: true }
: session,
),
: session
)
);
appendMessageToCurrentSession(userMessage);

Expand All @@ -189,7 +193,7 @@ export const Chatbot = () => {
botReply = await fetchChatbotReplyWithFiles(
currentSessionId!,
trimmed || "Please analyze the attached file(s).",
filesToSend,
filesToSend
);
} else {
botReply = await fetchChatbotReply(currentSessionId!, trimmed);
Expand All @@ -199,8 +203,8 @@ export const Chatbot = () => {
prevSessions.map((session) =>
session.id === currentSessionId
? { ...session, isLoading: false }
: session,
),
: session
)
);
appendMessageToCurrentSession(botReply);
};
Expand Down Expand Up @@ -246,6 +250,54 @@ export const Chatbot = () => {
setIsPopupOpen(true);
};

const getConsoleLogContext = (): string => {
// 1. Try standard Jenkins console selector
const consoleElement = document.querySelector("pre.console-output");

if (!consoleElement || !consoleElement.textContent) {
return "";
}

const fullLog = consoleElement.textContent;

// 2. Truncate if too large (e.g., last 5000 characters)
// We only need the error at the end, and we don't want to overload the LLM.
const maxLength = 5000;
if (fullLog.length > maxLength) {
return "...(logs truncated due to size)...\n" + fullLog.slice(-maxLength);
}

return fullLog;
};

/**
* Handlers for Proactive Toast
*/
const handleToastConfirm = () => {
setShowToast(false);
setIsOpen(true);

// 1. Scrape the logs
const logs = getConsoleLogContext();

// 2. Construct the prompt
if (logs) {
const messageWithContext = `I found a build failure. Here are the last 5000 characters of the log:\n\n\`\`\`\n${logs}\n\`\`\`\n\nCan you analyze this error?`;
setInput(messageWithContext);

// Optional: If you want to send it immediately without clicking the arrow button:
// sendMessage(messageWithContext);
} else {
setInput(
"I noticed a build failure, but I couldn't read the logs automatically. Can you paste them?"
);
}
};

const handleToastDismiss = () => {
setShowToast(false);
};

const getWelcomePage = () => {
return (
<div style={chatbotStyles.containerWelcomePage}>
Expand Down Expand Up @@ -301,6 +353,12 @@ export const Chatbot = () => {
>
{getChatbotText("toggleButtonLabel")}
</button>
{showToast && !isOpen && (
<ProactiveToast
onConfirm={handleToastConfirm}
onDismiss={handleToastDismiss}
/>
)}

{isOpen && (
<div
Expand Down
33 changes: 33 additions & 0 deletions frontend/src/components/Toast.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import { chatbotStyles } from "../styles/styles";

interface ProactiveToastProps {
onConfirm: () => void;
onDismiss: () => void;
}

export const ProactiveToast = ({ onConfirm, onDismiss }: ProactiveToastProps) => {
return (
<div style={chatbotStyles.toastContainer}>
<div style={chatbotStyles.toastHeader}>
<span>🤖 Jenkins Assistant</span>
</div>
<div style={chatbotStyles.toastContent}>
I detected a build failure. Would you like me to analyze the logs for you?
</div>
<div style={chatbotStyles.toastActions}>
<button
style={chatbotStyles.toastCancelButton}
onClick={onDismiss}
>
No
</button>
<button
style={chatbotStyles.toastConfirmButton}
onClick={onConfirm}
>
Yes, analyze
</button>
</div>
</div>
);
};
59 changes: 59 additions & 0 deletions frontend/src/styles/styles.ts
Original file line number Diff line number Diff line change
Expand Up @@ -499,4 +499,63 @@ export const chatbotStyles = {
cursor: "pointer",
transition: "background 0.2s, border-left 0.2s",
}) as CSSProperties,

// Toast Notification
toastContainer: {
position: "fixed",
bottom: "7rem",
right: "2rem",
width: "300px",
backgroundColor: "var(--card-background)",
border: "1px solid var(--border-color)",
borderRadius: "8px",
boxShadow: "0 4px 12px rgba(0,0,0,0.15)",
padding: "1rem",
zIndex: 1000,
display: "flex",
flexDirection: "column",
gap: "0.5rem",
animation: "fadeIn 0.3s ease-in-out",
} as CSSProperties,

toastHeader: {
display: "flex",
alignItems: "center",
gap: "0.5rem",
fontWeight: "bold",
fontSize: "0.9rem",
color: "var(--text-color)",
} as CSSProperties,

toastContent: {
fontSize: "0.85rem",
color: "var(--text-color-secondary)",
marginBottom: "0.5rem",
} as CSSProperties,

toastActions: {
display: "flex",
justifyContent: "flex-end",
gap: "0.5rem",
} as CSSProperties,

toastConfirmButton: {
backgroundColor: "#0073e6",
color: "white",
border: "none",
borderRadius: "4px",
padding: "4px 12px",
fontSize: "0.85rem",
cursor: "pointer",
} as CSSProperties,

toastCancelButton: {
backgroundColor: "transparent",
color: "var(--text-color)",
border: "1px solid var(--border-color)",
borderRadius: "4px",
padding: "4px 12px",
fontSize: "0.85rem",
cursor: "pointer",
} as CSSProperties,
};
Loading