Skip to content

Commit 1e0bc35

Browse files
authored
Merge pull request #1086 from DhruvKadam-git/main
Update chat_models_gguf_fast_start.py
2 parents 6936ef4 + a2edd18 commit 1e0bc35

File tree

2 files changed

+76
-32
lines changed

2 files changed

+76
-32
lines changed

examples/Models/chat_models_gguf_fast_start.py

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,47 @@
1-
21
"""This example demonstrates several leading open source chat models running in 4-bit GGUF on local laptop."""
32

43
import time
54
import re
65
from llmware.prompts import Prompt
6+
import logging
77

88

99
# Run the benchmark test
10-
def run_test(model_name, prompt_list):
11-
12-
print(f"\n > Loading model '{model_name}'")
13-
14-
prompter = Prompt().load_model(model_name)
10+
def run_test(model_name: str, prompt_list: list[dict]) -> int:
11+
"""Run the benchmark test on the specified model with the given prompts.
12+
13+
Args:
14+
model_name (str): The name of the model to load.
15+
prompt_list (list[dict]): A list of prompts to test the model with.
16+
17+
Returns:
18+
int: Status code (0 for success).
19+
"""
20+
logging.basicConfig(level=logging.INFO)
21+
22+
logging.info(f"Loading model '{model_name}'")
23+
24+
try:
25+
prompter = Prompt().load_model(model_name)
26+
except Exception as e:
27+
logging.error(f"Failed to load model: {e}")
28+
return 1
1529

1630
for i, entry in enumerate(prompt_list):
17-
1831
start_time = time.time()
19-
print("\n")
20-
print(f"query - {i+1} - {entry['query']}")
32+
logging.info(f"query - {i+1} - {entry['query']}")
2133

22-
response = prompter.prompt_main(entry["query"])
34+
try:
35+
response = prompter.prompt_main(entry["query"])
36+
except Exception as e:
37+
logging.error(f"Error during prompting: {e}")
38+
continue
2339

2440
# Print results
2541
time_taken = round(time.time() - start_time, 2)
2642
llm_response = re.sub("[\n\n]", "\n", response['llm_response'])
27-
print(f"llm_response - {i+1} - {llm_response}")
28-
print(f"time_taken - {i+1} - {time_taken}")
43+
logging.info(f"llm_response - {i+1} - {llm_response}")
44+
logging.info(f"time_taken - {i+1} - {time_taken}")
2945

3046
return 0
3147

llmware/graph.py

Lines changed: 48 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
# Copyright 2023-2024 llmware
32

43
# Licensed under the Apache License, Version 2.0 (the "License"); you
@@ -65,14 +64,22 @@ def __init__(self, library):
6564
self.pre_initialization_bow_data = {}
6665
self.post_initialization_bow_data = {}
6766

68-
# create stop words txt file in nlp path
69-
self.stop_words = Utilities().load_stop_words_list(self.library.nlp_path)
67+
# Load stop words with error handling
68+
try:
69+
self.stop_words = Utilities().load_stop_words_list(self.library.nlp_path)
70+
except Exception as e:
71+
logger.error(f"Failed to load stop words: {e}")
72+
self.stop_words = []
7073

71-
# load graph c modules - note: if any issues loading module, will be captured in get_module_graph_functions()
72-
self._mod_utility = Utilities().get_module_graph_functions()
74+
# Load graph C modules with error handling
75+
try:
76+
self._mod_utility = Utilities().get_module_graph_functions()
77+
except Exception as e:
78+
logger.error(f"Failed to load graph utility module: {e}")
79+
self._mod_utility = None
7380

7481
# new method - used to track 'counter' inside the bow files for incremental read/write/analysis
75-
def bow_locator(self):
82+
def bow_locator(self) -> tuple:
7683

7784
""" Internal utility method used to enable scalability across multiple underlying BOW (Bag-of-Word)
7885
files which are created by the graph module. """
@@ -103,16 +110,26 @@ def bow_locator(self):
103110
f"{top_bow_file}")
104111
bow_index = 0
105112

106-
fp = open(os.path.join(dataset_fp, top_bow_file), "r", encoding='utf-8')
107-
fp.seek(0, 2)
108-
bow_byte_index = fp.tell()
109-
fp.seek(0, 0) # rewind
110-
bow_tokens = len(fp.read().split(","))
111-
fp.close()
113+
try:
114+
fp = open(os.path.join(dataset_fp, top_bow_file), "r", encoding='utf-8')
115+
fp.seek(0, 2)
116+
bow_byte_index = fp.tell()
117+
fp.seek(0, 0) # rewind
118+
bow_tokens = len(fp.read().split(","))
119+
fp.close()
120+
except FileNotFoundError:
121+
logger.error(f"BOW file not found: {top_bow_file}")
122+
return 0, 0, 0, [], True
123+
except Exception as e:
124+
logger.error(f"Error reading BOW file: {e}")
125+
return 0, 0, 0, [], True
126+
finally:
127+
if 'fp' in locals():
128+
fp.close()
112129

113130
return bow_index, bow_byte_index, bow_tokens, bow_files, no_bow
114131

115-
def build_graph(self):
132+
def build_graph(self) -> dict:
116133

117134
""" Generates multiple valuable nlp artifacts in the library's /nlp folder path, with the
118135
primary objective of generating the co-occurrence matrix. """
@@ -186,9 +203,11 @@ def build_graph(self):
186203
graph_summary.update({"time_stamp": ts})
187204

188205
# write to manifest.json for knowledge graph
189-
json_dict = json.dumps(graph_summary,indent=2)
190-
with open(os.path.join(self.library.nlp_path,"manifest.json"),"w", encoding='utf-8') as outfile:
191-
outfile.write(json_dict)
206+
try:
207+
with open(os.path.join(self.library.nlp_path,"manifest.json"), "w", encoding='utf-8') as outfile:
208+
outfile.write(json.dumps(graph_summary, indent=2))
209+
except Exception as e:
210+
logger.error(f"Failed to write manifest.json: {e}")
192211

193212
return graph_summary
194213

@@ -833,16 +852,25 @@ def get_unique_vocab_len(self):
833852

834853
return len(self.get_unique_vocab_lookup())
835854

836-
def get_unique_vocab_lookup(self):
855+
def get_unique_vocab_lookup(self) -> dict:
837856

838857
""" Returns the unique vocab list found in the Library corpus. """
839858

840859
if self.library.get_knowledge_graph_status() != "yes":
841860
self.build_graph()
842861

843-
j = json.load(open(os.path.join(self.library.nlp_path,"vocab_lookup.json"), "r", encoding='utf-8'))
844-
845-
return j
862+
try:
863+
with open(os.path.join(self.library.nlp_path, "vocab_lookup.json"), "r", encoding='utf-8') as file:
864+
return json.load(file)
865+
except FileNotFoundError:
866+
logger.error("vocab_lookup.json file not found.")
867+
return {}
868+
except json.JSONDecodeError:
869+
logger.error("Error decoding JSON from vocab_lookup.json.")
870+
return {}
871+
except Exception as e:
872+
logger.error(f"Unexpected error: {e}")
873+
return {}
846874

847875
def get_unique_vocab_reverse_lookup(self):
848876

0 commit comments

Comments
 (0)