|
| 1 | +import React, { useState } from 'react'; |
| 2 | +import { useAsync } from 'react-use'; |
| 3 | + |
| 4 | +import { llms } from '@grafana/experimental'; |
| 5 | +import { Button, Input, Spinner } from '@grafana/ui'; |
| 6 | +import { SceneComponentProps, SceneObjectBase, SceneObjectState, SceneTimeRangeCompare, sceneGraph } from '@grafana/scenes'; |
| 7 | +import { CustomSceneObject } from 'pages/Home/CustomSceneObject'; |
| 8 | + |
| 9 | +interface CustomObjectState extends SceneObjectState { |
| 10 | + llmResponse: string; |
| 11 | +} |
| 12 | + |
| 13 | +export class AdvancedLLMIntegration extends SceneObjectBase<CustomObjectState> { |
| 14 | + static Component = CustomObjectRenderer; |
| 15 | + |
| 16 | + constructor(state: CustomObjectState) { |
| 17 | + super({ ...state }); |
| 18 | + } |
| 19 | + |
| 20 | + applyCommand = (command: string, customObject: CustomSceneObject, sceneTimeRangeCompare: SceneTimeRangeCompare) => { |
| 21 | + if (!isNaN(parseInt(command, 10))) { |
| 22 | + customObject.setState({ counter: parseInt(command, 10)}); |
| 23 | + } |
| 24 | + |
| 25 | + switch (command) { |
| 26 | + case 'compare': |
| 27 | + sceneTimeRangeCompare.setState({ compareWith: '1w' }); |
| 28 | + break; |
| 29 | + default: |
| 30 | + break; |
| 31 | + } |
| 32 | + } |
| 33 | +} |
| 34 | + |
| 35 | +function CustomObjectRenderer({ model }: SceneComponentProps<AdvancedLLMIntegration>) { |
| 36 | + const data = sceneGraph.getData(model).useState(); |
| 37 | + |
| 38 | + |
| 39 | + |
| 40 | + // The current input value. |
| 41 | + const [input, setInput] = useState(''); |
| 42 | + // The final message to send to the LLM, updated when the button is clicked. |
| 43 | + const [message, setMessage] = useState(''); |
| 44 | + // The latest reply from the LLM. |
| 45 | + const [reply, setReply] = useState(''); |
| 46 | + |
| 47 | + const SYSTEM_PROMPT = ` |
| 48 | + Based on the user query provide a set response based on the following mappings: ${JSON.stringify(SYSTEM_PROMPT_ACTION_MAPPINGS)} |
| 49 | +
|
| 50 | + If you are unable to determine the user's intent try your best to provide a response based on the context of the scene with a max of 100 characters. If you are still not confident or if the question is not related just say "I'm not sure how to assist with that". If the user is asking to compare and contrast data only return the word "compare". |
| 51 | +
|
| 52 | + Only return a single word or number based on the user input, unless the command is "explain" in which case return a 250 character max response that also answers why it is important to the user using the following context of the embedded scene: ${JSON.stringify(data.data?.timeRange)} ${JSON.stringify(data.data?.series.slice(0, 10))} |
| 53 | + ` |
| 54 | + |
| 55 | + const { loading, error, value } = useAsync(async () => { |
| 56 | + // Check if the LLM plugin is enabled and configured. |
| 57 | + // If not, we won't be able to make requests, so return early. |
| 58 | + const openAIHealthDetails = await llms.openai.enabled(); |
| 59 | + const enabled = openAIHealthDetails; |
| 60 | + if (!enabled) { |
| 61 | + return { enabled }; |
| 62 | + } |
| 63 | + if (message === '') { |
| 64 | + return { enabled }; |
| 65 | + } |
| 66 | + |
| 67 | + // Make a single request to the LLM. |
| 68 | + const response = await llms.openai.chatCompletions({ |
| 69 | + model: 'gpt-3.5-turbo', |
| 70 | + messages: [ |
| 71 | + { role: 'system', content: SYSTEM_PROMPT }, |
| 72 | + { role: 'user', content: message }, |
| 73 | + ], |
| 74 | + }); |
| 75 | + setReply(response.choices[0].message.content); |
| 76 | + model.setState({ llmResponse: response.choices[0].message.content }); |
| 77 | + return { enabled, response }; |
| 78 | + }, [message]); |
| 79 | + |
| 80 | + if (error) { |
| 81 | + // TODO: handle errors. |
| 82 | + return null; |
| 83 | + } |
| 84 | + |
| 85 | + return ( |
| 86 | + <div> |
| 87 | + {value?.enabled ? ( |
| 88 | + <> |
| 89 | + <Input |
| 90 | + style={{ padding: '10px' }} |
| 91 | + value={input} |
| 92 | + onChange={(e) => setInput(e.currentTarget.value)} |
| 93 | + placeholder="Enter a message" |
| 94 | + /> |
| 95 | + <br /> |
| 96 | + <Button |
| 97 | + type="submit" |
| 98 | + onClick={() => {setMessage(input)}} |
| 99 | + > |
| 100 | + Submit Request |
| 101 | + </Button> |
| 102 | + <br /> |
| 103 | + <div style={{ padding: '10px' }}> |
| 104 | + {loading ? <Spinner /> : reply} |
| 105 | + </div> |
| 106 | + </> |
| 107 | + ) : ( |
| 108 | + <div>LLM plugin not enabled.</div> |
| 109 | + )} |
| 110 | + </div> |
| 111 | + ); |
| 112 | +} |
| 113 | + |
| 114 | +// DEMO QUESTIONS |
| 115 | +// Show me 20 servers |
| 116 | +// Explain the data being displayed |
| 117 | +// Compare and contrast this server's data with last week's data |
| 118 | + |
| 119 | +// WARNING SUPER HACKY, DO NOT SHOW DURING DEMO :s |
| 120 | +const SYSTEM_PROMPT_ACTION_MAPPINGS = { |
| 121 | + explain: 'the user wants the data being displayed explained, use any context provided, and return 250 character max response, do not make anything up', |
| 122 | + showNumberOfServers: 'return number of servers to show, only return the number the user suggested', |
| 123 | + compare: 'if user mentions compare and contrast data, return just the word "compare"', |
| 124 | +} |
0 commit comments