diff --git a/examples/expert-proofreader/.env.example b/examples/expert-proofreader/.env.example index 85d27328..a5aacd2b 100644 --- a/examples/expert-proofreader/.env.example +++ b/examples/expert-proofreader/.env.example @@ -1 +1 @@ -NEXT_LB_PIPE_API_KEY="" \ No newline at end of file +LB_PIPE_API_KEY="" \ No newline at end of file diff --git a/examples/expert-proofreader/README.md b/examples/expert-proofreader/README.md index ea984ceb..436f3661 100755 --- a/examples/expert-proofreader/README.md +++ b/examples/expert-proofreader/README.md @@ -1,24 +1,26 @@ - ![Expert Proofreader Chatbot by ⌘ Langbase][cover] ![License: MIT][mit] [![Fork to ⌘ Langbase][fork]][pipe] -## Build Expert Proofreader with Pipes — ⌘ Langbase -This chatbot is built by using an AI Pipe on Langbase, it works with 30+ LLMs (OpenAI, Gemini, Mistral, Llama, Gemma, etc), any Data (10M+ context with Memory sets), and any Framework (standard web API you can use with any software). +## Build Expert Proofreader with a Pipe — ⌘ Langbase + +This chatbot is built by using an agentic Pipe on Langbase, it works with 30+ LLMs (OpenAI, Gemini, Mistral, Llama, Gemma, etc), any Data (10M+ context with Memory sets), and any Framework (standard web API you can use with any software). + Check out the live demo [here][demo]. ## Features -- 💬 [Expert Proofreader Bot][demo] — Built with an [AI Pipe on ⌘ Langbase][pipe] + +- 💬 [Expert Proofreader Chatbot][demo] — Built with an [AI Pipe on ⌘ Langbase][pipe] - ⚡️ Streaming — Real-time chat experience with streamed responses - 🗣️ Q/A — Ask questions and get pre-defined answers with your preferred AI model and tone - 🔋 Responsive and open source — Works on all devices and platforms ## Learn more -1. Check the [Expert Proofreader Bot on ⌘ Langbase][pipe] +1. Check the [Expert Proofreader Chatbot on ⌘ Langbase][pipe] 2. Read the [source code on GitHub][gh] for this example 3. Go through Documentaion: [Pipe Quick Start][qs] 4. Learn more about [Pipes & Memory features on ⌘ Langbase][docs] @@ -29,16 +31,15 @@ Let's get started with the project: To get started with Langbase, you'll need to [create a free personal account on Langbase.com][signup] and verify your email address. _Done? Cool, cool!_ -1. Fork the [Expert Proofreader Bot][pipe] Pipe on ⌘ Langbase. +1. Fork the [Expert Proofreader Chatbot][pipe] Pipe on ⌘ Langbase. 2. Go to the API tab to copy the Pipe's API key (to be used on server-side only). 3. Download the example project folder from [here][download] or clone the reppository. 4. `cd` into the project directory and open it in your code editor. 5. Duplicate the `.env.example` file in this project and rename it to `.env.local`. 6. Add the following environment variables (.env.local): - ``` # Replace `PIPE_API_KEY` with the copied API key. - NEXT_LB_PIPE_API_KEY="PIPE_API_KEY" + LB_PIPE_API_KEY="PIPE_API_KEY" ``` 7. In your CLI issue the following ``` @@ -64,8 +65,7 @@ This project is created by [Langbase][lb] team members, with contributions from: **_Built by ⌘ [Langbase.com][lb] — Ship hyper-personalized AI assistants with memory!_** - -[cover]:https://raw.githubusercontent.com/LangbaseInc/docs-images/main/examples/expert-proofreader/expert-proofreader.png +[cover]:https://raw.githubusercontent.com/LangbaseInc/docs-images/main/examples/expert-proofreader/expert-proofreader-chatbot.png [demo]: https://expert-proofreader.langbase.dev [lb]: https://langbase.com [pipe]: https://langbase.com/examples/expert-proofreader diff --git a/examples/expert-proofreader/app/api/chat/route.ts b/examples/expert-proofreader/app/api/chat/route.ts index 95af6ed7..7987954b 100755 --- a/examples/expert-proofreader/app/api/chat/route.ts +++ b/examples/expert-proofreader/app/api/chat/route.ts @@ -10,9 +10,9 @@ export const runtime = 'edge' */ export async function POST(req: Request) { try { - if (!process.env.NEXT_LB_PIPE_API_KEY) { + if (!process.env.LB_PIPE_API_KEY) { throw new Error( - 'Please set NEXT_LB_PIPE_API_KEY in your environment variables.' + 'Please set LB_PIPE_API_KEY in your environment variables.' ) } @@ -20,7 +20,7 @@ export async function POST(req: Request) { const headers = { 'Content-Type': 'application/json', - Authorization: `Bearer ${process.env.NEXT_LB_PIPE_API_KEY}` + Authorization: `Bearer ${process.env.LB_PIPE_API_KEY}` } // Get chat prompt messages and threadId from the client. diff --git a/examples/expert-proofreader/components/chatbot-page.tsx b/examples/expert-proofreader/components/chatbot-page.tsx index 80203378..e673c6dc 100755 --- a/examples/expert-proofreader/components/chatbot-page.tsx +++ b/examples/expert-proofreader/components/chatbot-page.tsx @@ -7,6 +7,7 @@ import { useState } from 'react' import { toast } from 'sonner' import { ChatInput } from './chat-input' import { Opening } from './opening' +import { Suggestions } from './suggestions' export interface ChatProps extends React.ComponentProps<'div'> { id?: string // Optional: Thread ID if you want to persist the chat in a DB @@ -31,6 +32,11 @@ export function Chatbot({ id, initialMessages, className }: ChatProps) { setThreadId(lbThreadId) } }) + + const sendSuggestedPrompt = (prompt: string) => { + setInput(prompt) + } + return (
@@ -39,7 +45,10 @@ export function Chatbot({ id, initialMessages, className }: ChatProps) { ) : ( - + <> + + + )}

- Expert Proofreader Bot by a + Expert Proofreader Chatbot by a 1. - Fork this Expert Proofreader Bot Pipe on ⌘ Langbase + Fork this Expert Proofreader Chatbot Pipe on ⌘ Langbase 2. diff --git a/examples/expert-proofreader/components/prompt-form.tsx b/examples/expert-proofreader/components/prompt-form.tsx index 49b6cb77..cb14302d 100755 --- a/examples/expert-proofreader/components/prompt-form.tsx +++ b/examples/expert-proofreader/components/prompt-form.tsx @@ -4,6 +4,7 @@ import { useEnterSubmit } from '@/lib/hooks/use-enter-submit' import { UseChatHelpers } from 'ai/react' import * as React from 'react' import Textarea from 'react-textarea-autosize' +import { HoverCard, HoverCardTrigger, HoverCardContent } from '@/components/ui/hovercard' export interface PromptProps extends Pick { @@ -50,6 +51,17 @@ export function PromptForm({ aria-hidden="true" />

Chat

+ + + + + +
    +
  • Say Hello to start a conversation, or simply enter the text you want to be proofread by the Expert Proofreader chatbot.
  • +
  • For more nuanced proofreading, you can choose state-of-the-art LLMs such as Claude 3.5, GPT-4 Turbo, or GPT-4o. With the Langbase agentic pipeline, this can be done easily with just a few clicks.
  • +
+
+

diff --git a/examples/expert-proofreader/components/suggestions.tsx b/examples/expert-proofreader/components/suggestions.tsx new file mode 100644 index 00000000..b61dab2a --- /dev/null +++ b/examples/expert-proofreader/components/suggestions.tsx @@ -0,0 +1,58 @@ +import cn from 'mxcn' +import { IconSparkles } from './ui/icons' + +// Prompt suggestions – Change these to match your use-case/company +const suggestions = [ + { + title: `Say hello to begin conversation`, + prompt: `Hello` + }, + { + title: `Demo text`, + prompt: `The computational demands of Large Language Models (LLMs) have escalated dramatically, commensurate with their increasing size and complexity. +Training state-of-the-art LLMs necessitates vast arrays of high-performance GPUs, often numbering in the thousands, and can consume several megawatt-hours of electricity over periods extending to weeks or even months. +This resource-intensive process raises pertinent questions about the models' environmental impact and the economic feasibility of their development for all but the most well-funded research institutions or technology companies. +Moreover, the inference phase, while less demanding than training, still requires substantial computational resources, particularly for real-time applications, thereby limiting the deployment of these models in resource-constrained environments or edge devices. +Consequently, there is a growing impetus in the field to develop more efficient architectures and training paradigms that can mitigate these computational burdens without compromising the remarkable capabilities that have made LLMs so transformative in natural language processing.` + + + }, +] + +export const Suggestions = ({ + sendSuggestedPrompt +}: { + sendSuggestedPrompt: (prompt: string) => void +}) => { + const handleClick = (prompt: string) => { + sendSuggestedPrompt(prompt) + } + + return ( +
+ +
+ {suggestions.map((suggestion, index) => { + return ( +
handleClick(suggestion.prompt)} + > +
+ ) + })} +
+
+ ) +} \ No newline at end of file diff --git a/examples/expert-proofreader/components/ui/hovercard.tsx b/examples/expert-proofreader/components/ui/hovercard.tsx new file mode 100644 index 00000000..cc819857 --- /dev/null +++ b/examples/expert-proofreader/components/ui/hovercard.tsx @@ -0,0 +1,29 @@ +"use client" + +import * as React from "react" +import * as HoverCardPrimitive from "@radix-ui/react-hover-card" + +import cn from 'mxcn' + +const HoverCard = HoverCardPrimitive.Root + +const HoverCardTrigger = HoverCardPrimitive.Trigger + +const HoverCardContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, align = "center", sideOffset = 4, ...props }, ref) => ( + +)) +HoverCardContent.displayName = HoverCardPrimitive.Content.displayName + +export { HoverCard, HoverCardTrigger, HoverCardContent } \ No newline at end of file