This folder contains the code for the live coding session at the Symfony Con 2024 in Vienna.
Install a new Symfony project using the Symfony Skeleton:
composer create-project symfony/skeleton symfony-con-2024
cd symfony-con-2024Install the dependencies for the project:
# 1. Install the Modelflow AI Symfony Bundle and the chat package
composer require modelflow-ai/symfony-bundle modelflow-ai/chat
# Install the Modelflow AI Adapters: Here OpenAI and Ollama
composer require modelflow-ai/openai-adapter modelflow-ai/ollama-adapter
# Install Symfony Maker Bundle
composer require symfony/maker-bundle --devAs there is currently no symfony flex recipe for the Modelflow AI Bundle, you need to configure the bundle manually.
<?php
return [
...
ModelflowAi\Integration\Symfony\ModelflowAiBundle::class => ['all' => true],
];# config/packages/modelflow_ai.yaml
modelflow_ai:
providers:
openai:
enabled: true
credentials:
api_key: '%env(OPENAI_API_KEY)%'
adapters:
gpt4o:
enabled: trueCreate a new command to interact with the chatbot:
bin/console make:command app:chatAdd the service ModelflowAi\Chat\AIChatRequestHandlerInterface to the constructor of the command:
public function __construct(
private AIChatRequestHandlerInterface $chatRequestHandler,
) {
parent::__construct();
}Implement the command logic:
protected function execute(InputInterface $input, OutputInterface $output): int
{
$io = new SymfonyStyle($input, $output);
$question = $io->ask('You');
$response = $this->chatRequestHandler
->createRequest()
->addUserMessage($question)
->build()
->execute();
$io->success($response->getMessage()->content);
return Command::SUCCESS;
}Run the command and ask the chatbot a question:
bin/console app:chatAdd a messages array to the execute method:
protected function execute(InputInterface $input, OutputInterface $output): int
{
$io = new SymfonyStyle($input, $output);
$messages = [];
while(true) {
$question = $io->ask('You');
if('exit' === $question) {
break;
}
$response = $this->chatRequestHandler
->createRequest(...$messages)
->addUserMessage($question)
->build()
->execute();
$io->success($response->getMessage()->content);
$messages = $response->getRequest()->getMessages();
$messages[] = new AIChatMessage(AIChatMessageRoleEnum::ASSISTANT, $response->getMessage()->content);
}
return Command::SUCCESS;
}This will allow the chatbot to remember the conversation history and provide more contextually relevant responses.
Just add the streamed flag to the request and change the way to display the content:
protected function execute(InputInterface $input, OutputInterface $output): int
{
$io = new SymfonyStyle($input, $output);
$messages = [];
while(true) {
$question = $io->ask('You');
if('exit' === $question) {
break;
}
/** @var AIChatResponseStream $response */
$response = $this->chatRequestHandler
->createRequest(...$messages)
->addUserMessage($question)
->streamed()
->build()
->execute();
foreach ($response->getMessageStream() as $message) {
$io->write($message->content);
}
$io->newLine(2);
$messages = $response->getRequest()->getMessages();
$messages[] = new AIChatMessage(AIChatMessageRoleEnum::ASSISTANT, $response->getMessage()->content);
}
return Command::SUCCESS;
}Add the Ollama adapter to the configuration:
modelflow_ai:
providers:
openai:
enabled: true
credentials:
api_key: '%env(OPENAI_API_KEY)%'
ollama:
enabled: true
adapters:
gpt4o:
enabled: true
llama3_2:
enabled: trueUse the Ollama adapter in the command:
/** @var AIChatResponseStream $response */
$response = $this->chatRequestHandler
->createRequest(...$messages)
->addUserMessage($question)
->addCriteria(ModelCriteria::LLAMA3_2)
->streamed()
->build()
->execute();