Skip to content

Commit 92de842

Browse files
feat: cohere inference api endpoint (#15)
* feat: cohere inference api endpoint * fix: fix executable config file
1 parent 7745059 commit 92de842

File tree

4 files changed

+93
-5
lines changed

4 files changed

+93
-5
lines changed

.gitignore

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
# burpference logs and local config
33
logs/
44
.burpference/
5-
configs/*
6-
!configs/*.example.json
5+
configs/*_local.json # Only ignore files with _local suffix
76
prompt.txt
87

98
### Python ###

burpference/api_adapters.py

Lines changed: 60 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,63 @@ def process_response(self, response_data):
275275
return str(response)
276276

277277

278+
# Cohere /v2/chat API adapter class
279+
class CohereAPIAdapter(BaseAPIAdapter):
280+
def prepare_request(self, user_content, system_content=None):
281+
messages = []
282+
if system_content:
283+
messages.append(
284+
{
285+
"role": "SYSTEM",
286+
"content": system_content,
287+
}
288+
)
289+
messages.append(
290+
{
291+
"role": "USER",
292+
"content": user_content,
293+
}
294+
)
295+
296+
return {
297+
"model": self.config.get("model", "command-r-plus-08-2024"),
298+
"messages": messages,
299+
"stream": self.config.get("stream", False),
300+
}
301+
302+
def process_response(self, response_data):
303+
response = json.loads(response_data)
304+
if "text" in response:
305+
return response["text"]
306+
elif "response" in response and "text" in response["response"]:
307+
return response["response"]["text"]
308+
else:
309+
raise ValueError("Unexpected response format: %s" % str(response))
310+
311+
def send_request(self, request_payload):
312+
headers = self.config.get("headers", {})
313+
if not headers:
314+
headers = {
315+
"accept": "application/json",
316+
"content-type": "application/json",
317+
"Authorization": "Bearer %s" % self.config.get("api_key", ""),
318+
}
319+
320+
encoded_data = json.dumps(request_payload).encode("utf-8")
321+
req = urllib2.Request(
322+
self.config.get("host"), data=encoded_data, headers=headers
323+
)
324+
325+
try:
326+
response = urllib2.urlopen(req)
327+
return response.read()
328+
except urllib2.HTTPError as e:
329+
error_message = e.read().decode("utf-8")
330+
raise ValueError("HTTP Error %d: %s" % (e.code, error_message))
331+
except Exception as e:
332+
raise ValueError("Error sending request: %s" % str(e))
333+
334+
278335
# Generic other API base adapter
279336

280337

@@ -295,7 +352,9 @@ def get_api_adapter(config):
295352
api_type = config.get("api_type", "").lower()
296353
endpoint = config.get("host", "").lower()
297354

298-
if api_type == "ollama":
355+
if api_type == "cohere":
356+
return CohereAPIAdapter(config)
357+
elif api_type == "ollama":
299358
if "/generate" in endpoint:
300359
return OllamaGenerateAPIAdapter(config)
301360
elif "/chat" in endpoint:

configs/README.md

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@ If you intend to fork or contribute to burpference, ensure that you have exclude
1717
- [Example OpenAI `/completions` inference with `gpt-4o-mini`:](#example-openai-completions-inference-with-gpt-4o-mini)
1818
- [HuggingFace Serveless Inference](#huggingface-serveless-inference)
1919
- [Example HuggingFace `/text-generation` inference](#example-huggingface-text-generation-inference)
20+
- [Cohere `/v2/chat` Inference](#cohere-v2chat-inference)
21+
- [Example Cohere `/v2/chat` inference](#example-cohere-v2chat-inference)
2022
- [Model System Prompts](#model-system-prompts)
2123

2224
---
@@ -122,9 +124,26 @@ In order to serve inference as part of burpference, the model must be running on
122124
}
123125
```
124126

125-
## Model System Prompts
127+
### Cohere `/v2/chat` Inference
128+
129+
#### Example Cohere `/v2/chat` inference
130+
131+
```json
132+
{
133+
"api_type": "cohere",
134+
"headers": {
135+
"Authorization": "bearer CO_API_KEY",
136+
"accept": "application/json",
137+
"content-type": "application/json"
138+
},
139+
"host": "https://api.cohere.com/v2/chat",
140+
"model": "command-r-plus-08-2024",
141+
"stream": false
142+
}
143+
```
126144

127-
By default, the system prompt sent as pretext to the model is defined [here](../prompts/proxy_prompt.txt), feel free to edit, tune and tweak as you see fit.
145+
## Model System Prompts
128146

147+
By default, the system prompt sent as pretext to the model is defined [here](../prompts/proxy_prompt.txt), feel free to edit, tune and tweak as you see fit. This is also true for the scanner extension tab.
129148

130149
---
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
{
2+
"api_type": "cohere",
3+
"headers": {
4+
"Authorization": "bearer CO_API_KEY",
5+
"accept": "application/json",
6+
"content-type": "application/json"
7+
},
8+
"host": "https://api.cohere.com/v2/chat",
9+
"model": "command-r-plus-08-2024",
10+
"stream": false
11+
}

0 commit comments

Comments
 (0)