Skip to content

Commit 9020fd5

Browse files
committed
Add LLM scene example
1 parent ed669e0 commit 9020fd5

File tree

8 files changed

+451
-0
lines changed

8 files changed

+451
-0
lines changed

examples/app-with-scenes/src/components/Routes/Routes.tsx

+2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import React from 'react';
22
import { Redirect, Route, Switch } from 'react-router-dom';
33
import { HomePage } from '../../pages/Home';
44
import { PageWithTabs } from '../../pages/WithTabs';
5+
import { WithLLM } from '../../pages/WithLLM';
56
import { WithDrilldown } from '../../pages/WithDrilldown';
67
import { prefixRoute } from '../../utils/utils.routing';
78
import { ROUTES } from '../../constants';
@@ -12,6 +13,7 @@ export const Routes = () => {
1213
<Switch>
1314
<Route path={prefixRoute(`${ROUTES.WithTabs}`)} component={PageWithTabs} />
1415
<Route path={prefixRoute(`${ROUTES.WithDrilldown}`)} component={WithDrilldown} />
16+
<Route path={prefixRoute(`${ROUTES.WithLLM}`)} component={WithLLM} />
1517
<Route path={prefixRoute(`${ROUTES.Home}`)} component={HomePage} />
1618
<Route path={prefixRoute(`${ROUTES.HelloWorld}`)} component={HelloWorldPluginPage} />
1719
<Redirect to={prefixRoute(ROUTES.Home)} />

examples/app-with-scenes/src/constants.ts

+1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ export enum ROUTES {
66
Home = 'home',
77
WithTabs = 'page-with-tabs',
88
WithDrilldown = 'page-with-drilldown',
9+
WithLLM = 'page-with-llm',
910
HelloWorld = 'hello-world',
1011
}
1112

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import React, { useMemo } from "react";
2+
import { SceneApp, SceneAppPage } from "@grafana/scenes";
3+
import { prefixRoute } from "utils/utils.routing";
4+
import { ROUTES } from "../../constants";
5+
import { getAdvancedLLMIntegrationScene, getBasicLLMIntegrationScene } from "./scenes";
6+
7+
const getLLMAppScene = () => {
8+
return new SceneApp({
9+
pages: [
10+
new SceneAppPage({
11+
title: 'Page with LLM integration',
12+
subTitle: 'This page showcases basic LLM integration within a scenes app',
13+
url: prefixRoute(`${ROUTES.WithLLM}`),
14+
getScene: getBasicLLMIntegrationScene,
15+
tabs: [
16+
new SceneAppPage({
17+
title: 'Basic LLM Integration',
18+
url: prefixRoute(`${ROUTES.WithLLM}/basic`),
19+
getScene: getBasicLLMIntegrationScene,
20+
}),
21+
new SceneAppPage({
22+
title: 'Advanced LLM Integration',
23+
url: prefixRoute(`${ROUTES.WithLLM}/advanced`),
24+
getScene: getAdvancedLLMIntegrationScene,
25+
}),
26+
],
27+
})
28+
]
29+
})
30+
}
31+
32+
export const WithLLM = () => {
33+
const scene = useMemo(() => getLLMAppScene(), []);
34+
35+
return <scene.Component model={scene} />;
36+
};
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
import React, { useState } from 'react';
2+
import { useAsync } from 'react-use';
3+
4+
import { llms } from '@grafana/experimental';
5+
import { Button, Input, Spinner } from '@grafana/ui';
6+
import { SceneComponentProps, SceneObjectBase, SceneObjectState, SceneTimeRangeCompare, sceneGraph } from '@grafana/scenes';
7+
import { CustomSceneObject } from 'pages/Home/CustomSceneObject';
8+
9+
interface CustomObjectState extends SceneObjectState {
10+
llmResponse: string;
11+
}
12+
13+
export class AdvancedLLMIntegration extends SceneObjectBase<CustomObjectState> {
14+
static Component = CustomObjectRenderer;
15+
16+
constructor(state: CustomObjectState) {
17+
super({ ...state });
18+
}
19+
20+
applyCommand = (command: string, customObject: CustomSceneObject, sceneTimeRangeCompare: SceneTimeRangeCompare) => {
21+
if (!isNaN(parseInt(command, 10))) {
22+
customObject.setState({ counter: parseInt(command, 10)});
23+
}
24+
25+
switch (command) {
26+
case 'compare':
27+
sceneTimeRangeCompare.setState({ compareWith: '1w' });
28+
break;
29+
default:
30+
break;
31+
}
32+
}
33+
}
34+
35+
function CustomObjectRenderer({ model }: SceneComponentProps<AdvancedLLMIntegration>) {
36+
const data = sceneGraph.getData(model).useState();
37+
38+
39+
40+
// The current input value.
41+
const [input, setInput] = useState('');
42+
// The final message to send to the LLM, updated when the button is clicked.
43+
const [message, setMessage] = useState('');
44+
// The latest reply from the LLM.
45+
const [reply, setReply] = useState('');
46+
47+
const SYSTEM_PROMPT = `
48+
Based on the user query provide a set response based on the following mappings: ${JSON.stringify(SYSTEM_PROMPT_ACTION_MAPPINGS)}
49+
50+
If you are unable to determine the user's intent try your best to provide a response based on the context of the scene with a max of 100 characters. If you are still not confident or if the question is not related just say "I'm not sure how to assist with that". If the user is asking to compare and contrast data only return the word "compare".
51+
52+
Only return a single word or number based on the user input, unless the command is "explain" in which case return a 250 character max response that also answers why it is important to the user using the following context of the embedded scene: ${JSON.stringify(data.data?.timeRange)} ${JSON.stringify(data.data?.series.slice(0, 10))}
53+
`
54+
55+
const { loading, error, value } = useAsync(async () => {
56+
// Check if the LLM plugin is enabled and configured.
57+
// If not, we won't be able to make requests, so return early.
58+
const openAIHealthDetails = await llms.openai.enabled();
59+
const enabled = openAIHealthDetails;
60+
if (!enabled) {
61+
return { enabled };
62+
}
63+
if (message === '') {
64+
return { enabled };
65+
}
66+
67+
// Make a single request to the LLM.
68+
const response = await llms.openai.chatCompletions({
69+
model: 'gpt-3.5-turbo',
70+
messages: [
71+
{ role: 'system', content: SYSTEM_PROMPT },
72+
{ role: 'user', content: message },
73+
],
74+
});
75+
setReply(response.choices[0].message.content);
76+
model.setState({ llmResponse: response.choices[0].message.content });
77+
return { enabled, response };
78+
}, [message]);
79+
80+
if (error) {
81+
// TODO: handle errors.
82+
return null;
83+
}
84+
85+
return (
86+
<div>
87+
{value?.enabled ? (
88+
<>
89+
<Input
90+
style={{ padding: '10px' }}
91+
value={input}
92+
onChange={(e) => setInput(e.currentTarget.value)}
93+
placeholder="Enter a message"
94+
/>
95+
<br />
96+
<Button
97+
type="submit"
98+
onClick={() => {setMessage(input)}}
99+
>
100+
Submit Request
101+
</Button>
102+
<br />
103+
<div style={{ padding: '10px' }}>
104+
{loading ? <Spinner /> : reply}
105+
</div>
106+
</>
107+
) : (
108+
<div>LLM plugin not enabled.</div>
109+
)}
110+
</div>
111+
);
112+
}
113+
114+
// DEMO QUESTIONS
115+
// Show me 20 servers
116+
// Explain the data being displayed
117+
// Compare and contrast this server's data with last week's data
118+
119+
// WARNING SUPER HACKY, DO NOT SHOW DURING DEMO :s
120+
const SYSTEM_PROMPT_ACTION_MAPPINGS = {
121+
explain: 'the user wants the data being displayed explained, use any context provided, and return 250 character max response, do not make anything up',
122+
showNumberOfServers: 'return number of servers to show, only return the number the user suggested',
123+
compare: 'if user mentions compare and contrast data, return just the word "compare"',
124+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
import React, { useState } from 'react';
2+
import { useAsync } from 'react-use';
3+
4+
import { llms } from '@grafana/experimental';
5+
import { Button, Input, Spinner } from '@grafana/ui';
6+
import { finalize } from 'rxjs';
7+
8+
export function BasicLLMIntegration() {
9+
// The current input value.
10+
const [input, setInput] = useState('');
11+
// The final message to send to the LLM, updated when the button is clicked.
12+
const [message, setMessage] = useState('');
13+
// The latest reply from the LLM.
14+
const [reply, setReply] = useState('');
15+
16+
const [useStream, setUseStream] = useState(false);
17+
18+
const [started, setStarted] = useState(false);
19+
const [finished, setFinished] = useState(true);
20+
21+
const { loading, error, value } = useAsync(async () => {
22+
// Check if the LLM plugin is enabled and configured.
23+
// If not, we won't be able to make requests, so return early.
24+
const openAIHealthDetails = await llms.openai.enabled();
25+
const enabled = openAIHealthDetails;
26+
if (!enabled) {
27+
return { enabled };
28+
}
29+
if (message === '') {
30+
return { enabled };
31+
}
32+
33+
setStarted(true);
34+
setFinished(false);
35+
if (!useStream) {
36+
// Make a single request to the LLM.
37+
const response = await llms.openai.chatCompletions({
38+
model: 'gpt-3.5-turbo',
39+
messages: [
40+
{ role: 'system', content: 'You are a cynical assistant.' },
41+
{ role: 'user', content: message },
42+
],
43+
});
44+
setReply(response.choices[0].message.content);
45+
setStarted(false);
46+
setFinished(true);
47+
return { enabled, response };
48+
} else {
49+
// Stream the completions. Each element is the next stream chunk.
50+
const stream = llms.openai.streamChatCompletions({
51+
model: 'gpt-3.5-turbo',
52+
messages: [
53+
{ role: 'system', content: 'You are a cynical assistant.' },
54+
{ role: 'user', content: message },
55+
],
56+
}).pipe(
57+
// Accumulate the stream content into a stream of strings, where each
58+
// element contains the accumulated message so far.
59+
llms.openai.accumulateContent(),
60+
// The stream is just a regular Observable, so we can use standard rxjs
61+
// functionality to update state, e.g. recording when the stream
62+
// has completed.
63+
finalize(() => {
64+
setStarted(false);
65+
setFinished(true);
66+
})
67+
);
68+
// Subscribe to the stream and update the state for each returned value.
69+
return {
70+
enabled,
71+
stream: stream.subscribe(setReply),
72+
};
73+
}
74+
}, [message]);
75+
76+
if (error) {
77+
// TODO: handle errors.
78+
return null;
79+
}
80+
81+
return (
82+
<div>
83+
{value?.enabled ? (
84+
<>
85+
<Input
86+
style={{ padding: '10px' }}
87+
value={input}
88+
onChange={(e) => setInput(e.currentTarget.value)}
89+
placeholder="Enter a message"
90+
/>
91+
<br />
92+
<Button
93+
style={{ marginRight: '10px'}}
94+
type="submit"
95+
onClick={() => {setMessage(input); setUseStream(true);}}
96+
>
97+
Submit Stream
98+
</Button>
99+
<Button
100+
type="submit"
101+
onClick={() => {setMessage(input); setUseStream(false);}}
102+
>
103+
Submit Request
104+
</Button>
105+
<br />
106+
<div style={{ padding: '10px' }}>
107+
{loading ? <Spinner /> : reply}
108+
</div>
109+
<div style={{ padding: '10px' }}>
110+
{started ? "Response is started" : "Response is not started"}
111+
</div>
112+
<div style={{ padding: '10px' }}>
113+
{finished ? "Response is finished" : "Response is not finished"}
114+
</div>
115+
</>
116+
) : (
117+
<div>LLM plugin not enabled.</div>
118+
)}
119+
</div>
120+
);
121+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
export * from './WithLLM';

0 commit comments

Comments
 (0)