|
1043 | 1043 | "\n",
|
1044 | 1044 | "As you notice above, OpenAI Chat Completions API does not call the function; instead, the model generates JSON that you can use to call the function in your code. That's why, to build an end-to-end chat application, you need to check if the OpenAI response is a `tool_calls` for every message. If so, you need to call the corresponding function with the provided arguments and send the function response back to OpenAI. Otherwise, append both user and messages to the `messages` list to have a regular conversation with the model. \n",
|
1045 | 1045 | "\n",
|
1046 |
| - "Run the code cell below and use the input box to interact with the chat application that has access to two tools you've created above. \n", |
| 1046 | + "To build a nice UI for your application, you can use [Gradio](https://www.gradio.app/) that comes with a chat interface. Install `gradio`, run the code cell below and use the input box to interact with the chat application that has access to two tools you've created above. \n", |
1047 | 1047 | "\n",
|
1048 | 1048 | "Example queries you can try:\n",
|
1049 | 1049 | "* \"***What is the capital of Sweden?***\": A basic query without any function calls\n",
|
|
1057 | 1057 | },
|
1058 | 1058 | {
|
1059 | 1059 | "cell_type": "code",
|
1060 |
| - "execution_count": 24, |
| 1060 | + "execution_count": null, |
| 1061 | + "metadata": {}, |
| 1062 | + "outputs": [], |
| 1063 | + "source": [ |
| 1064 | + "%%bash\n", |
| 1065 | + "\n", |
| 1066 | + "pip install gradio" |
| 1067 | + ] |
| 1068 | + }, |
| 1069 | + { |
| 1070 | + "cell_type": "code", |
| 1071 | + "execution_count": null, |
1061 | 1072 | "metadata": {
|
1062 | 1073 | "colab": {
|
1063 | 1074 | "base_uri": "https://localhost:8080/",
|
|
1079 | 1090 | "id": "sK_JeKZLhXcy",
|
1080 | 1091 | "outputId": "b0c8c776-4151-44a0-9acd-b12f53119af8"
|
1081 | 1092 | },
|
1082 |
| - "outputs": [ |
1083 |
| - { |
1084 |
| - "name": "stdout", |
1085 |
| - "output_type": "stream", |
1086 |
| - "text": [ |
1087 |
| - "ENTER YOUR MESSAGE 👇 INFO: Type 'exit' or 'quit' to stop\n", |
1088 |
| - "Can you tell me where Giorgio lives?\n" |
1089 |
| - ] |
1090 |
| - }, |
1091 |
| - { |
1092 |
| - "data": { |
1093 |
| - "application/vnd.jupyter.widget-view+json": { |
1094 |
| - "model_id": "235721f8c1c14ce3885fba24638bc27f", |
1095 |
| - "version_major": 2, |
1096 |
| - "version_minor": 0 |
1097 |
| - }, |
1098 |
| - "text/plain": [ |
1099 |
| - "Batches: 0%| | 0/1 [00:00<?, ?it/s]" |
1100 |
| - ] |
1101 |
| - }, |
1102 |
| - "metadata": {}, |
1103 |
| - "output_type": "display_data" |
1104 |
| - }, |
1105 |
| - { |
1106 |
| - "name": "stdout", |
1107 |
| - "output_type": "stream", |
1108 |
| - "text": [ |
1109 |
| - "Giorgio lives in Rome.ENTER YOUR MESSAGE 👇 INFO: Type 'exit' or 'quit' to stop\n", |
1110 |
| - "What's the weather like there?\n", |
1111 |
| - "The weather in Rome is sunny with a temperature of 14°C.ENTER YOUR MESSAGE 👇 INFO: Type 'exit' or 'quit' to stop\n", |
1112 |
| - "exit\n" |
1113 |
| - ] |
1114 |
| - } |
1115 |
| - ], |
| 1093 | + "outputs": [], |
1116 | 1094 | "source": [
|
| 1095 | + "import gradio as gr\n", |
1117 | 1096 | "import json\n",
|
1118 | 1097 | "\n",
|
1119 |
| - "from haystack.dataclasses import ChatMessage, ChatRole\n", |
| 1098 | + "from haystack.dataclasses import ChatMessage\n", |
1120 | 1099 | "from haystack.components.generators.chat import OpenAIChatGenerator\n",
|
1121 |
| - "from haystack.components.generators.utils import print_streaming_chunk\n", |
1122 | 1100 | "\n",
|
1123 |
| - "chat_generator = OpenAIChatGenerator(model=\"gpt-3.5-turbo\", streaming_callback=print_streaming_chunk)\n", |
| 1101 | + "chat_generator = OpenAIChatGenerator(model=\"gpt-3.5-turbo\")\n", |
1124 | 1102 | "response = None\n",
|
1125 | 1103 | "messages = [\n",
|
1126 | 1104 | " ChatMessage.from_system(\n",
|
1127 | 1105 | " \"Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.\"\n",
|
1128 | 1106 | " )\n",
|
1129 | 1107 | "]\n",
|
1130 | 1108 | "\n",
|
1131 |
| - "while True:\n", |
1132 |
| - " # if OpenAI response is a tool call\n", |
1133 |
| - " if response and response[\"replies\"][0].meta[\"finish_reason\"] == \"tool_calls\":\n", |
1134 |
| - " function_calls = json.loads(response[\"replies\"][0].content)\n", |
1135 | 1109 | "\n",
|
1136 |
| - " for function_call in function_calls:\n", |
1137 |
| - " ## Parse function calling information\n", |
1138 |
| - " function_name = function_call[\"function\"][\"name\"]\n", |
1139 |
| - " function_args = json.loads(function_call[\"function\"][\"arguments\"])\n", |
| 1110 | + "def chatbot_with_fc(message, history):\n", |
| 1111 | + " messages.append(ChatMessage.from_user(message))\n", |
| 1112 | + " response = chat_generator.run(messages=messages, generation_kwargs={\"tools\": tools})\n", |
1140 | 1113 | "\n",
|
1141 |
| - " ## Find the correspoding function and call it with the given arguments\n", |
1142 |
| - " function_to_call = available_functions[function_name]\n", |
1143 |
| - " function_response = function_to_call(**function_args)\n", |
| 1114 | + " while True:\n", |
| 1115 | + " # if OpenAI response is a tool call\n", |
| 1116 | + " if response and response[\"replies\"][0].meta[\"finish_reason\"] == \"tool_calls\":\n", |
| 1117 | + " function_calls = json.loads(response[\"replies\"][0].content)\n", |
| 1118 | + " print(response[\"replies\"][0])\n", |
| 1119 | + " for function_call in function_calls:\n", |
| 1120 | + " ## Parse function calling information\n", |
| 1121 | + " function_name = function_call[\"function\"][\"name\"]\n", |
| 1122 | + " function_args = json.loads(function_call[\"function\"][\"arguments\"])\n", |
1144 | 1123 | "\n",
|
1145 |
| - " ## Append function response to the messages list using `ChatMessage.from_function`\n", |
1146 |
| - " messages.append(ChatMessage.from_function(content=json.dumps(function_response), name=function_name))\n", |
| 1124 | + " ## Find the correspoding function and call it with the given arguments\n", |
| 1125 | + " function_to_call = available_functions[function_name]\n", |
| 1126 | + " function_response = function_to_call(**function_args)\n", |
1147 | 1127 | "\n",
|
1148 |
| - " # Regular Conversation\n", |
1149 |
| - " else:\n", |
1150 |
| - " # Append assistant messages to the messages list\n", |
1151 |
| - " if not messages[-1].is_from(ChatRole.SYSTEM):\n", |
1152 |
| - " messages.append(response[\"replies\"][0])\n", |
| 1128 | + " ## Append function response to the messages list using `ChatMessage.from_function`\n", |
| 1129 | + " messages.append(ChatMessage.from_function(content=json.dumps(function_response), name=function_name))\n", |
| 1130 | + " response = chat_generator.run(messages=messages, generation_kwargs={\"tools\": tools})\n", |
1153 | 1131 | "\n",
|
1154 |
| - " user_input = input(\"ENTER YOUR MESSAGE 👇 INFO: Type 'exit' or 'quit' to stop\\n\")\n", |
1155 |
| - " if user_input.lower() == \"exit\" or user_input.lower() == \"quit\":\n", |
1156 |
| - " break\n", |
| 1132 | + " # Regular Conversation\n", |
1157 | 1133 | " else:\n",
|
1158 |
| - " messages.append(ChatMessage.from_user(user_input))\n", |
| 1134 | + " messages.append(response[\"replies\"][0])\n", |
| 1135 | + " break\n", |
| 1136 | + " return response[\"replies\"][0].content\n", |
| 1137 | + "\n", |
1159 | 1138 | "\n",
|
1160 |
| - " response = chat_generator.run(messages=messages, generation_kwargs={\"tools\": tools})" |
| 1139 | + "demo = gr.ChatInterface(\n", |
| 1140 | + " fn=chatbot_with_fc,\n", |
| 1141 | + " examples=[\n", |
| 1142 | + " \"Can you tell me where Giorgio lives?\",\n", |
| 1143 | + " \"What's the weather like in Madrid?\",\n", |
| 1144 | + " \"Who lives in London?\",\n", |
| 1145 | + " \"What's the weather like where Mark lives?\",\n", |
| 1146 | + " ],\n", |
| 1147 | + " title=\"Ask me about weather or where people live!\",\n", |
| 1148 | + ")\n", |
| 1149 | + "demo.launch()" |
1161 | 1150 | ]
|
1162 | 1151 | },
|
1163 | 1152 | {
|
|
0 commit comments