24
24
from django .http import StreamingHttpResponse
25
25
from django_prometheus .conf import NAMESPACE
26
26
from drf_spectacular .utils import OpenApiResponse , extend_schema
27
+ from llama_stack_client import AsyncLlamaStackClient
28
+ from llama_stack_client .lib .agents .agent import AsyncAgent
29
+ from llama_stack_client .types .agent_create_params import AgentConfig
27
30
from oauth2_provider .contrib .rest_framework import IsAuthenticatedOrTokenHasScope
28
31
from prometheus_client import Histogram
29
32
from rest_framework import permissions , serializers
103
106
)
104
107
from ansible_ai_connect .users .models import User
105
108
106
- from ...main .permissions import IsAAPUser , IsRHInternalUser , IsTestUser
109
+ # from ...main.permissions import IsAAPUser, IsRHInternalUser, IsTestUser
107
110
from ...users .throttling import EndpointRateThrottle
108
111
from ..feature_flags import FeatureFlags
109
112
from .data .data_model import ContentMatchPayloadData , ContentMatchResponseDto
@@ -246,12 +249,13 @@ def finalize_response(self, request, response, *args, **kwargs):
246
249
response = super ().finalize_response (request , response , * args , ** kwargs )
247
250
248
251
try :
249
- model_meta_data : MetaData = apps .get_app_config ("ai" ).get_model_pipeline (MetaData )
250
- user = request .user
251
- org_id = hasattr (user , "organization" ) and user .organization and user .organization .id
252
- self .event .modelName = self .event .modelName or model_meta_data .get_model_id (
253
- request .user , org_id , self .req_model_id
254
- )
252
+ pass
253
+ # model_meta_data: MetaData = apps.get_app_config("ai").get_model_pipeline(MetaData)
254
+ # user = request.user
255
+ # org_id = hasattr(user, "organization") and user.organization and user.organization.id
256
+ # self.event.modelName = self.event.modelName or model_meta_data.get_model_id(
257
+ # request.user, org_id, self.req_model_id
258
+ # )
255
259
except (WcaNoDefaultModelId , WcaModelIdNotFound , WcaSecretManagerError ):
256
260
pass
257
261
self .event .set_response (response )
@@ -1044,9 +1048,9 @@ class ChatEndpointThrottle(EndpointRateThrottle):
1044
1048
scope = "chat"
1045
1049
1046
1050
permission_classes = [
1047
- permissions .IsAuthenticated ,
1048
- IsAuthenticatedOrTokenHasScope ,
1049
- IsRHInternalUser | IsTestUser | IsAAPUser ,
1051
+ # permissions.IsAuthenticated,
1052
+ # IsAuthenticatedOrTokenHasScope,
1053
+ # IsRHInternalUser | IsTestUser | IsAAPUser,
1050
1054
]
1051
1055
required_scopes = ["read" , "write" ]
1052
1056
schema1_event = schema1 .ChatBotOperationalEvent
@@ -1142,9 +1146,9 @@ class StreamingChatEndpointThrottle(EndpointRateThrottle):
1142
1146
scope = "chat"
1143
1147
1144
1148
permission_classes = [
1145
- permissions .IsAuthenticated ,
1146
- IsAuthenticatedOrTokenHasScope ,
1147
- IsRHInternalUser | IsTestUser | IsAAPUser ,
1149
+ # permissions.IsAuthenticated,
1150
+ # IsAuthenticatedOrTokenHasScope,
1151
+ # IsRHInternalUser | IsTestUser | IsAAPUser,
1148
1152
]
1149
1153
required_scopes = ["read" , "write" ]
1150
1154
schema1_event = schema1 .StreamingChatBotOperationalEvent
@@ -1167,6 +1171,33 @@ def __init__(self):
1167
1171
else :
1168
1172
logger .debug ("Chatbot is not enabled." )
1169
1173
1174
+ agent_config = AgentConfig (
1175
+ # model="anthropic/claude-3-5-haiku-latest",
1176
+ model = "llama3.2:3b-instruct-fp16" ,
1177
+ instructions = "You are a helpful Ansible Automation Platform assistant." ,
1178
+ sampling_params = {
1179
+ "strategy" : {"type" : "top_p" , "temperature" : 1.0 , "top_p" : 0.9 },
1180
+ },
1181
+ toolgroups = (
1182
+ [
1183
+ # "mcp::weather",
1184
+ # "mcp::github",
1185
+ # "mcp::fs",
1186
+ # "mcp::aap_api",
1187
+ # "mcp::gateway_api",
1188
+ # "builtin::websearch",
1189
+ ]
1190
+ ),
1191
+ tool_choice = "auto" ,
1192
+ input_shields = [], # available_shields if available_shields else [],
1193
+ output_shields = [], # available_shields if available_shields else [],
1194
+ enable_session_persistence = False ,
1195
+ )
1196
+ self .client = AsyncLlamaStackClient (
1197
+ base_url = "http://localhost:8321" ,
1198
+ )
1199
+ self .agent = AsyncAgent (self .client , agent_config )
1200
+
1170
1201
@extend_schema (
1171
1202
request = StreamingChatRequestSerializer ,
1172
1203
responses = {
@@ -1205,5 +1236,12 @@ def post(self, request) -> StreamingHttpResponse:
1205
1236
provider = req_provider ,
1206
1237
conversation_id = conversation_id ,
1207
1238
media_type = media_type ,
1239
+ chatbackend_config = {
1240
+ # "agent_config": self.agent_config,
1241
+ # "agent_id": self.agent.agent_id,
1242
+ # "session_id": self.session_id,
1243
+ "client" : self .client ,
1244
+ "agent" : self .agent ,
1245
+ },
1208
1246
)
1209
1247
)
0 commit comments