Skip to content

Commit e267faf

Browse files
committed
added _experiment of transforming a ChatGPT thread into an CBR thread
1 parent 81b1521 commit e267faf

File tree

1 file changed

+79
-0
lines changed

1 file changed

+79
-0
lines changed
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
2+
from unittest import TestCase
3+
import pytest
4+
import requests
5+
#from bs4 import BeautifulSoup
6+
from osbot_utils.helpers.Random_Guid import Random_Guid
7+
from osbot_utils.utils.Dev import pprint
8+
from osbot_utils.utils.Files import file_create, file_contains, file_contents, temp_file
9+
from osbot_utils.utils.Json import json_load, json_parse, json_file_create, json_file_load
10+
from osbot_utils.utils.Objects import dict_to_obj
11+
from osbot_llms.models.GPT_History import GPT_History
12+
from osbot_llms.models.LLMs__Chat_Completion import LLMs__Chat_Completion
13+
14+
15+
# notes:
16+
# - this worked really well :)
17+
# - the use of bs4 might be a bit overthe top since all we need is the contents of the last script tag
18+
# - there was a bug with Html_To_Tag that prevented its use ( todo: see why and fix it)
19+
# @pytest.mark.skip("Needs to be integrated into a service")
20+
# class test_chat_gpt_threads(TestCase):
21+
# def test_load_chat_gpt_saved_chat(self):
22+
#
23+
# # url = "https://chatgpt.com/share/6713bfef-3778-8006-9bbd-01f842395d6c"
24+
# # response = requests.get(url)
25+
# # assert response.status_code
26+
# # html_code = response.text
27+
# #html_file = file_create(contents=html_code, extension=".html")
28+
#
29+
# # html_file = '/var/folders/z4/3k99j_cn39g_0jnqt6w57f6m0000gn/T/tmpqnpeu3bj.html'
30+
# # html_code = file_contents(path=html_file)
31+
# # html_soup = BeautifulSoup(html_code, 'html.parser')
32+
# # js_code = html_soup.find_all('script')[1].text
33+
# # json_code = js_code[24:-1]
34+
# # json_data = json.loads(json_code)
35+
#
36+
# #json_file = json_file_create(json_data, path=temp_file(extension=".json"))
37+
# json_file = '/var/folders/z4/3k99j_cn39g_0jnqt6w57f6m0000gn/T/tmpw28a0cpx.json'
38+
# json_data = json_file_load(json_file)
39+
# json_obj = dict_to_obj(json_data)
40+
#
41+
# action_json = json_data.get('state').get('loaderData').get('routes/share.$shareId.($action)')
42+
# action_obj = dict_to_obj(action_json)
43+
#
44+
# data_obj = action_obj.serverResponse.data
45+
#
46+
# title = data_obj.title
47+
# print()
48+
#
49+
# question = None
50+
# answer = None
51+
# for item in data_obj.linear_conversation:
52+
# if hasattr(item, 'message'):
53+
# if hasattr(item.message.metadata, 'is_visually_hidden_from_conversation'):
54+
# continue
55+
# role = item.message.author.role
56+
# if item.message.content.content_type == 'text':
57+
# parts = item.message.content.parts
58+
# text = ''.join(parts)
59+
# if role == 'user':
60+
# question = text
61+
# if role == 'assistant':
62+
# answer = text
63+
# gpt_history = GPT_History(question=question, answer=answer)
64+
# llms_chat_completion = LLMs__Chat_Completion(histories=[gpt_history])
65+
# llms_chat_completion.chat_thread_id = Random_Guid()
66+
# llms_chat_completion.user_prompt = "can you summarize this conversation?"
67+
# llms_chat_completion.stream = True
68+
# llms_chat_completion.llm_platform = "Groq (Free)"
69+
# llms_chat_completion.llm_provider = "1. Meta"
70+
# llms_chat_completion.llm_model = "llama-3.1-70b-versatile"
71+
#
72+
# request_json = llms_chat_completion.json()
73+
# #request_url = "https://community.prod.aws.cyber-boardroom.com/api/llms/chat/completion_proxy"
74+
# request_url = "https://community.dev.aws.cyber-boardroom.com/api/open_ai/prompt_with_system__stream"
75+
# response = requests.post(request_url, json=request_json)
76+
#
77+
# pprint(llms_chat_completion.chat_thread_id)
78+
#
79+
# pprint(dict(response.headers))

0 commit comments

Comments
 (0)