Skip to content

Commit 31bb7c3

Browse files
authored
Merge pull request #87 from neph1/update-v0.31.1
llama3 "template"
2 parents 455a30c + c9e164f commit 31bb7c3

File tree

6 files changed

+22
-14
lines changed

6 files changed

+22
-14
lines changed

llm_config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ DUNGEON_LOCATION_TEMPLATE: '{"index": (int), "name": "", "description": 25 words
1616
CHARACTER_TEMPLATE: '{"name":"", "description": "50 words", "appearance": "25 words", "personality": "50 words", "money":(int), "level":"", "gender":"m/f/n", "age":(int), "race":""}'
1717
FOLLOW_TEMPLATE: '{{"response":"yes or no", "reason":"50 words"}}'
1818
ITEM_TYPES: ["Weapon", "Wearable", "Health", "Money", "Trash", "Food", "Drink", "Key"]
19-
PRE_PROMPT: 'You are a creative game keeper for a role playing game (RPG). You craft detailed worlds and interesting characters with unique and deep personalities for the player to interact with. Do not acknowledge the task, just perform it.'
19+
PRE_PROMPT: 'You are a creative game keeper for a role playing game (RPG). You craft detailed worlds and interesting characters with unique and deep personalities for the player to interact with. Do not acknowledge the task or speak directly to the user, just perform it.'
2020
BASE_PROMPT: '<context>{context}</context>\n[USER_START]Rewrite [{input_text}] in your own words using the information found inside the <context> tags to create a background for your text. Use about {max_words} words.'
2121
DIALOGUE_PROMPT: '<context>{context}</context>\nThe following is a conversation between {character1} and {character2}; {character2}s sentiment towards {character1}: {sentiment}. Write a single response as {character2} in third person pov, using {character2} description and other information found inside the <context> tags. If {character2} has a quest active, they will discuss it based on its status. Respond in JSON using this template: """{dialogue_template}""". [USER_START]Continue the following conversation as {character2}: {previous_conversation}'
2222
COMBAT_PROMPT: '<context>{context}</context>\nThe following is a combat scene between {attackers} and {defenders} in {location}. [USER_START] Describe the following combat result in about 150 words in vivid language, using the characters weapons and their health status: 1.0 is highest, 0.0 is lowest. Combat Result: {input_text}'

tale/llm/LivingNpc.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,8 @@ def _defer_result(self, action: str, verb: str="idle-action"):
303303
self.tell_action_deferred(verb)
304304

305305
def tell_action_deferred(self, verb: str):
306-
actions = '\n'.join(self.deferred_actions) + '\n'
306+
actions = '\n'.join(self.deferred_actions) + '\n\n'
307+
actions = actions.replace('\n\n\n', '\n\n')
307308
deferred_action = ParseResult(verb=verb, unparsed=actions, who_info=None)
308309
self.tell_others(actions)
309310
self.location._notify_action_all(deferred_action, actor=self)

tale/llm/io_adapters.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,13 @@
1313

1414
class AbstractIoAdapter(ABC):
1515

16-
def __init__(self, url: str, stream_endpoint: str, user_start_prompt: str, user_end_prompt: str):
16+
def __init__(self, url: str, stream_endpoint: str, user_start_prompt: str, user_end_prompt: str, system_start_prompt: str = '', prompt_end: str = ''):
1717
self.url = url
1818
self.stream_endpoint = stream_endpoint
19+
self.system_start_prompt = system_start_prompt
1920
self.user_start_prompt = user_start_prompt
2021
self.user_end_prompt = user_end_prompt
22+
self.prompt_end = prompt_end
2123

2224
@abstractmethod
2325
def stream_request(self, request_body: dict, io = None, wait: bool = False) -> str:
@@ -37,9 +39,7 @@ def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict
3739

3840
class KoboldCppAdapter(AbstractIoAdapter):
3941

40-
41-
42-
def __init__(self, url: str, stream_endpoint: str, data_endpoint: str, user_start_prompt: str, user_end_prompt: str):
42+
def __init__(self, url: str, stream_endpoint: str, data_endpoint: str, user_start_prompt: str, user_end_prompt: str, system_start_prompt: str = '', prompt_end: str = ''):
4343
super().__init__(url, stream_endpoint, user_start_prompt, user_end_prompt)
4444
self.data_endpoint = data_endpoint
4545
self.place_context_in_memory = False
@@ -87,6 +87,8 @@ def parse_result(self, result: str) -> str:
8787
return json.loads(result)['results'][0]['text']
8888

8989
def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict:
90+
if self.system_start_prompt:
91+
prompt = self.system_start_prompt + prompt
9092
if self.user_start_prompt:
9193
prompt = prompt.replace('[USER_START]', self.user_start_prompt)
9294
if self.user_end_prompt:
@@ -96,6 +98,8 @@ def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict
9698
request_body['memory'] = f'<context>{context}</context>'
9799
else:
98100
prompt = prompt.replace('<context>{context}</context>', f'<context>{context}</context>')
101+
if self.prompt_end:
102+
prompt = prompt + self.prompt_end
99103
request_body['prompt'] = prompt
100104
return request_body
101105

@@ -143,12 +147,16 @@ def parse_result(self, result: str) -> str:
143147
raise LlmResponseException("Error parsing result from backend")
144148

145149
def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict:
150+
if self.system_start_prompt:
151+
prompt = self.system_start_prompt + prompt
146152
if self.user_start_prompt:
147153
prompt = prompt.replace('[USER_START]', self.user_start_prompt)
148154
if self.user_end_prompt:
149155
prompt = prompt + self.user_end_prompt
150156
if context:
151157
prompt = prompt.replace('<context>{context}</context>', f'<context>{context}</context>')
152158
#request_body['messages'][0]['content'] = f'<context>{context}</context>'
159+
if self.prompt_end:
160+
prompt = prompt + self.prompt_end
153161
request_body['messages'][1]['content'] = prompt
154162
return request_body

tale/llm/llm_io.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ def __init__(self, config: dict = None, backend_config: dict = None):
1818
headers['Authorization'] = f"Bearer {backend_config['OPENAI_API_KEY']}"
1919
self.openai_json_format = json.loads(backend_config['OPENAI_JSON_FORMAT'])
2020
self.headers = headers
21-
self.io_adapter = LlamaCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], config['USER_START'], config['USER_END'])
21+
self.io_adapter = LlamaCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], config['USER_START'], config['USER_END'], config.get('SYSTEM_START', ''), config.get('PROMPT_END', ''))
2222
else:
23-
self.io_adapter = KoboldCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], backend_config['DATA_ENDPOINT'], config['USER_START'], config['USER_END'])
23+
self.io_adapter = KoboldCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], backend_config['DATA_ENDPOINT'], config['USER_START'], config['USER_END'], config.get('SYSTEM_START', ''), config.get('PROMPT_END', ''))
2424
self.headers = {}
2525

2626
self.stream = backend_config['STREAM']

tale/player.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,7 @@ def tell(self, message: str, *, end: bool=False, format: bool=True, evoke: bool=
9494
msgs = msg.split('\n\n')
9595
if len(msgs) > 1:
9696
for msg in msgs:
97-
self._output.print(msg, end=end, format=format)
98-
self._output.p()
97+
self._output.print(msg, end=True, format=format)
9998
else:
10099
self._output.print(msg, end=end, format=format)
101100

tests/test_llm_ext.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ def test_do_say(self):
175175
self.npc.do_say(what_happened='something', actor=self.npc2)
176176
assert(self.npc.sentiments['actor'] == 'kind')
177177
assert(len(self.npc._observed_events) == 2)
178-
assert ["test : Hello there, how can I assist you today?\n"] == self.msg_trace_npc.messages
178+
assert ["test : Hello there, how can I assist you today?\n\n"] == self.msg_trace_npc.messages
179179

180180
@responses.activate
181181
def test_idle_action(self):
@@ -186,8 +186,8 @@ def test_idle_action(self):
186186
self.llm_util._character.io_util.response = []
187187
action = self.npc.idle_action()
188188
assert(action == 'sits down on a chair')
189-
assert(llm_cache.get_events(self.npc2._observed_events) == 'test : sits down on a chair\n')
190-
assert ["test : sits down on a chair\n"] == self.msg_trace_npc.messages
189+
assert(llm_cache.get_events(self.npc2._observed_events) == 'test : sits down on a chair\n\n')
190+
assert ["test : sits down on a chair\n\n"] == self.msg_trace_npc.messages
191191

192192
@responses.activate
193193
def test_do_react(self):
@@ -256,7 +256,7 @@ def test_give_action(self):
256256
self.npc.autonomous_action()
257257
assert self.npc.search_item('test item', include_location=False) == None
258258
assert(self.npc2.search_item('test item', include_location=False))
259-
assert ["test : Test gives test item to test\n"] == self.msg_trace_npc.messages
259+
assert ["test : Test gives test item to test\n\n"] == self.msg_trace_npc.messages
260260

261261
@responses.activate
262262
def test_move_action(self):

0 commit comments

Comments
 (0)