|
138 | 138 | {
|
139 | 139 | "data": {
|
140 | 140 | "text/plain": [
|
141 |
| - "{'replies': [ChatMessage(content='Natürliche Sprachverarbeitung (NLP) ist ein Bereich der künstlichen Intelligenz, der sich mit der Wechselwirkung zwischen Menschensprache und Maschinen befasst. Es zielt darauf ab, Computern das Verstehen, Interpretieren und Generieren menschlicher Sprache zu ermöglichen.', role=<ChatRole.ASSISTANT: 'assistant'>, name=None, meta={'model': 'gpt-3.5-turbo-0125', 'index': 0, 'finish_reason': 'stop', 'usage': {'completion_tokens': 74, 'prompt_tokens': 34, 'total_tokens': 108}})]}" |
| 141 | + "{'replies': [ChatMessage(content='Natürliche Sprachverarbeitung (NLP) ist ein Bereich der künstlichen Intelligenz, der sich mit der Wechselwirkung zwischen Menschensprache und Maschinen befasst. Es zielt darauf ab, Computern das Verstehen, Interpretieren und Generieren menschlicher Sprache zu ermöglichen.', role=<ChatRole.ASSISTANT: 'assistant'>, name=None, meta={'model': 'gpt-4o-mini-2024-07-18', 'index': 0, 'finish_reason': 'stop', 'usage': {'completion_tokens': 74, 'prompt_tokens': 34, 'total_tokens': 108}})]}" |
142 | 142 | ]
|
143 | 143 | },
|
144 | 144 | "execution_count": 4,
|
|
155 | 155 | " ChatMessage.from_user(\"What's Natural Language Processing? Be brief.\"),\n",
|
156 | 156 | "]\n",
|
157 | 157 | "\n",
|
158 |
| - "chat_generator = OpenAIChatGenerator(model=\"gpt-3.5-turbo\")\n", |
| 158 | + "chat_generator = OpenAIChatGenerator(model=\"gpt-4o-mini\")\n", |
159 | 159 | "chat_generator.run(messages=messages)"
|
160 | 160 | ]
|
161 | 161 | },
|
|
194 | 194 | "from haystack.components.generators.chat import OpenAIChatGenerator\n",
|
195 | 195 | "from haystack.components.generators.utils import print_streaming_chunk\n",
|
196 | 196 | "\n",
|
197 |
| - "chat_generator = OpenAIChatGenerator(model=\"gpt-3.5-turbo\", streaming_callback=print_streaming_chunk)\n", |
| 197 | + "chat_generator = OpenAIChatGenerator(model=\"gpt-4o-mini\", streaming_callback=print_streaming_chunk)\n", |
198 | 198 | "response = chat_generator.run(messages=messages)"
|
199 | 199 | ]
|
200 | 200 | },
|
|
662 | 662 | "rag_pipe.add_component(\"embedder\", SentenceTransformersTextEmbedder(model=\"sentence-transformers/all-MiniLM-L6-v2\"))\n",
|
663 | 663 | "rag_pipe.add_component(\"retriever\", InMemoryEmbeddingRetriever(document_store=document_store))\n",
|
664 | 664 | "rag_pipe.add_component(\"prompt_builder\", PromptBuilder(template=template))\n",
|
665 |
| - "rag_pipe.add_component(\"llm\", OpenAIGenerator(model=\"gpt-3.5-turbo\"))\n", |
| 665 | + "rag_pipe.add_component(\"llm\", OpenAIGenerator(model=\"gpt-4o-mini\"))\n", |
666 | 666 | "\n",
|
667 | 667 | "rag_pipe.connect(\"embedder.embedding\", \"retriever.query_embedding\")\n",
|
668 | 668 | "rag_pipe.connect(\"retriever\", \"prompt_builder.documents\")\n",
|
|
722 | 722 | "data": {
|
723 | 723 | "text/plain": [
|
724 | 724 | "{'llm': {'replies': ['Berlin'],\n",
|
725 |
| - " 'meta': [{'model': 'gpt-3.5-turbo-0125',\n", |
| 725 | + " 'meta': [{'model': 'gpt-4o-mini-2024-07-18',\n", |
726 | 726 | " 'index': 0,\n",
|
727 | 727 | " 'finish_reason': 'stop',\n",
|
728 | 728 | " 'usage': {'completion_tokens': 1,\n",
|
|
886 | 886 | " ChatMessage.from_user(\"Can you tell me where Mark lives?\"),\n",
|
887 | 887 | "]\n",
|
888 | 888 | "\n",
|
889 |
| - "chat_generator = OpenAIChatGenerator(model=\"gpt-3.5-turbo\", streaming_callback=print_streaming_chunk)\n", |
| 889 | + "chat_generator = OpenAIChatGenerator(model=\"gpt-4o-mini\", streaming_callback=print_streaming_chunk)\n", |
890 | 890 | "response = chat_generator.run(messages=messages, generation_kwargs={\"tools\": tools})"
|
891 | 891 | ]
|
892 | 892 | },
|
|
908 | 908 | " ChatMessage(\n",
|
909 | 909 | " content='[{\"index\": 0, \"id\": \"call_3VnT0XQH0ye41g3Ip5CRz4ri\", \"function\": {\"arguments\": \"{\\\\\"query\\\\\":\\\\\"Where does Mark live?\\\\\"}\", \"name\": \"rag_pipeline_func\"}, \"type\": \"function\"}]', role=<ChatRole.ASSISTANT: 'assistant'>, \n",
|
910 | 910 | " name=None, \n",
|
911 |
| - " meta={'model': 'gpt-3.5-turbo-0125', 'index': 0, 'finish_reason': 'tool_calls', 'usage': {}}\n", |
| 911 | + " meta={'model': 'gpt-4o-mini-2024-07-18', 'index': 0, 'finish_reason': 'tool_calls', 'usage': {}}\n", |
912 | 912 | " )\n",
|
913 | 913 | " ]\n",
|
914 | 914 | "}\n",
|
|
1098 | 1098 | "from haystack.dataclasses import ChatMessage\n",
|
1099 | 1099 | "from haystack.components.generators.chat import OpenAIChatGenerator\n",
|
1100 | 1100 | "\n",
|
1101 |
| - "chat_generator = OpenAIChatGenerator(model=\"gpt-3.5-turbo\")\n", |
| 1101 | + "chat_generator = OpenAIChatGenerator(model=\"gpt-4o-mini\")\n", |
1102 | 1102 | "response = None\n",
|
1103 | 1103 | "messages = [\n",
|
1104 | 1104 | " ChatMessage.from_system(\n",
|
|
0 commit comments