-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathhighlevel.py
119 lines (95 loc) · 4.32 KB
/
highlevel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# Copyright © 2024 IOTIC LABS LTD. [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/nyx-sdk/blob/main/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nyx_client.configuration import NyxConfigExtended, ConfigType, BaseNyxConfig
from nyx_extras.langchain import NyxLangChain
def main():
"""
Main displays the most simple usage of the LangChain module of Nyx Client. By default,
query will run against all the data, which will be downloaded and processed when the query method is called.
A config object must be provided to the NyxLangChain class, internally creating an LLM specific class based on
configuration type.
When instantiating a language model specific config, the relevant API key must be available as an
environment variable, or it must be passed in explicitly.
"""
# Supply ConfigType.COHERE to use Cohere LLM instead
base_config = BaseNyxConfig.from_env()
config = NyxConfigExtended(base_config=base_config, provider=ConfigType.COHERE.OPENAI, api_key="your_api_key_here")
client = NyxLangChain(config=config, log_level=logging.DEBUG)
while True:
prompt = input("What is your question? ")
if prompt == "":
continue
# When query is called, all subscribed data is pulled down from Nyx and supplied to the LLM.
print(client.query(prompt))
def custom_data():
"""
This displays how to limit the data you want to query. This can be useful if you want to
speed up the prompt, by reducing the data, and also prevents the data being downloaded and processed
automatically, giving you more control.
"""
client = NyxLangChain()
# Get data with the climate category only
data = client.get_data(genre="sdktest1", categories=["ai"]) # Combine multiples
while True:
prompt = input("What is your question? ")
if prompt == "":
continue
# Pass these into the query, now the LLM will only be supplied matching data
print(client.query(prompt, data=data))
def include_own_data():
"""
This displays how to include your own data, created in Nyx, in the query.
"""
client = NyxLangChain()
while True:
prompt = input("What is your question? ")
if prompt == "":
continue
print(client.query(prompt, include_own=True))
def custom_openai_llm():
"""
This displays how to pass an OpenAI model class into the client, this will be used to execute queries on.
In the LangChain module of Nyx, this will always be an instance of
langchain_core.language_models.chat_models.BaseChatModel. The LLM provided must support tool calling, or a
NotImplemented error will be raised.
"""
from langchain_openai import ChatOpenAI
config = NyxConfigExtended.from_env(provider=ConfigType.OPENAI)
llm = ChatOpenAI(model="gpt-4o-mini", api_key=config.api_key)
client = NyxLangChain(config=config, llm=llm)
while True:
prompt = input("What is your question? ")
if prompt == "":
continue
print(client.query(prompt, include_own=True))
def custom_llama_llm():
"""
You're not restricted to any LLM provider, you are free to use any of the supported BaseChatModel
(https://python.langchain.com/docs/integrations/chat/)
In this example we're using groq to run Llama 3.1 70b, but this can also be ran on your own hardware
for a totally private RAG!
"""
# pip install langchain-groq
from langchain_groq import ChatGroq
llm = ChatGroq(model="llama3-groq-70b-8192-tool-use-preview")
config = NyxConfigExtended.from_env(provider=ConfigType.BASE)
client = NyxLangChain(config=config, llm=llm)
while True:
prompt = input("What is your question? ")
if prompt == "":
continue
print(client.query(prompt, include_own=True))
if __name__ == "__main__":
custom_openai_llm()