Skip to content

Commit b3ec333

Browse files
committed
tests
1 parent 8758fa4 commit b3ec333

File tree

2 files changed

+35
-35
lines changed

2 files changed

+35
-35
lines changed

tests/utils/test_file_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import pytest
2-
from file_utils import get_file_name, github_file_url, github_folder_url
2+
from doc_generator.utils.file_utils import get_file_name, github_file_url, github_folder_url
33

44
def test_get_file_name_with_delimiter():
55
assert get_file_name("example.txt") == "example.md"

tests/utils/test_llm_utils.py

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import pytest
33
from unittest.mock import patch, MagicMock
44

5-
from llm_utils import (
5+
from doc_generator.utils.llm_utils import (
66
get_gemma_chat_model,
77
get_llama_chat_model,
88
get_openai_chat_model,
@@ -13,8 +13,8 @@
1313
get_embeddings,
1414
)
1515
from doc_generator.types import LLMModelDetails, LLMModels
16-
from langchain_openai import ChatOpenAI
17-
from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline, OpenAIEmbeddings
16+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
17+
from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline
1818

1919

2020
def test_get_gemma_chat_model_with_peft():
@@ -24,12 +24,12 @@ def test_get_gemma_chat_model_with_peft():
2424
"device": "cpu",
2525
"peft_model_path": "path/to/peft/model",
2626
}
27-
with patch("llm_utils.hf_hub_download") as mock_hf_download, \
28-
patch("llm_utils.get_tokenizer") as mock_get_tokenizer, \
29-
patch("llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
30-
patch("llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
31-
patch("llm_utils.pipeline") as mock_pipeline, \
32-
patch("llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
27+
with patch("doc_generator.utils.llm_utils.hf_hub_download") as mock_hf_download, \
28+
patch("doc_generator.utils.llm_utils.get_tokenizer") as mock_get_tokenizer, \
29+
patch("doc_generator.utils.llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
30+
patch("doc_generator.utils.llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
31+
patch("doc_generator.utils.llm_utils.pipeline") as mock_pipeline, \
32+
patch("doc_generator.utils.llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
3333
patch.dict(os.environ, {"HF_TOKEN": "test_token"}):
3434

3535
mock_tokenizer = MagicMock()
@@ -71,12 +71,12 @@ def test_get_gemma_chat_model_without_peft():
7171
"gguf_file": "some_file.gguf",
7272
"device": "cpu",
7373
}
74-
with patch("llm_utils.hf_hub_download") as mock_hf_download, \
75-
patch("llm_utils.get_tokenizer") as mock_get_tokenizer, \
76-
patch("llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
77-
patch("llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
78-
patch("llm_utils.pipeline") as mock_pipeline, \
79-
patch("llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
74+
with patch("doc_generator.utils.llm_utils.hf_hub_download") as mock_hf_download, \
75+
patch("doc_generator.utils.llm_utils.get_tokenizer") as mock_get_tokenizer, \
76+
patch("doc_generator.utils.llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
77+
patch("doc_generator.utils.llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
78+
patch("doc_generator.utils.llm_utils.pipeline") as mock_pipeline, \
79+
patch("doc_generator.utils.llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
8080
patch.dict(os.environ, {"HF_TOKEN": "test_token"}):
8181

8282
mock_tokenizer = MagicMock()
@@ -116,12 +116,12 @@ def test_get_llama_chat_model_with_peft():
116116
"device": "cpu",
117117
"peft_model": "path/to/peft/model",
118118
}
119-
with patch("llm_utils.hf_hub_download") as mock_hf_download, \
120-
patch("llm_utils.get_tokenizer") as mock_get_tokenizer, \
121-
patch("llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
122-
patch("llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
123-
patch("llm_utils.pipeline") as mock_pipeline, \
124-
patch("llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
119+
with patch("doc_generator.utils.llm_utils.hf_hub_download") as mock_hf_download, \
120+
patch("doc_generator.utils.llm_utils.get_tokenizer") as mock_get_tokenizer, \
121+
patch("doc_generator.utils.llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
122+
patch("doc_generator.utils.llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
123+
patch("doc_generator.utils.llm_utils.pipeline") as mock_pipeline, \
124+
patch("doc_generator.utils.llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
125125
patch.dict(os.environ, {"HF_TOKEN": "test_token"}):
126126

127127
mock_tokenizer = MagicMock()
@@ -165,12 +165,12 @@ def test_get_llama_chat_model_without_peft():
165165
"gguf_file": "some_file.gguf",
166166
"device": "cpu",
167167
}
168-
with patch("llm_utils.hf_hub_download") as mock_hf_download, \
169-
patch("llm_utils.get_tokenizer") as mock_get_tokenizer, \
170-
patch("llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
171-
patch("llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
172-
patch("llm_utils.pipeline") as mock_pipeline, \
173-
patch("llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
168+
with patch("doc_generator.utils.llm_utils.hf_hub_download") as mock_hf_download, \
169+
patch("doc_generator.utils.llm_utils.get_tokenizer") as mock_get_tokenizer, \
170+
patch("doc_generator.utils.llm_utils.AutoModelForCausalLM.from_pretrained") as mock_auto_model, \
171+
patch("doc_generator.utils.llm_utils.PeftModel.from_pretrained") as mock_peft_model, \
172+
patch("doc_generator.utils.llm_utils.pipeline") as mock_pipeline, \
173+
patch("doc_generator.utils.llm_utils.HuggingFacePipeline") as mock_hf_pipeline, \
174174
patch.dict(os.environ, {"HF_TOKEN": "test_token"}):
175175

176176
mock_tokenizer = MagicMock()
@@ -216,7 +216,7 @@ def test_get_openai_chat_model():
216216
assert isinstance(result, ChatOpenAI)
217217
assert result.temperature == temperature
218218
assert result.streaming == streaming
219-
assert result.model == model
219+
assert result.model_name == model
220220
assert result.model_kwargs == model_kwargs
221221

222222

@@ -235,7 +235,7 @@ def test_get_openai_api_key_not_set(monkeypatch):
235235
def test_get_tokenizer_with_hf_token(monkeypatch):
236236
model_name = "some-model"
237237
gguf_file = "some_file.gguf"
238-
with patch("llm_utils.AutoTokenizer.from_pretrained") as mock_from_pretrained:
238+
with patch("doc_generator.utils.llm_utils.AutoTokenizer.from_pretrained") as mock_from_pretrained:
239239
mock_tokenizer = MagicMock()
240240
mock_from_pretrained.return_value = mock_tokenizer
241241

@@ -253,7 +253,7 @@ def test_get_tokenizer_with_hf_token(monkeypatch):
253253
def test_get_tokenizer_without_hf_token(monkeypatch):
254254
model_name = "some-model"
255255
gguf_file = "some_file.gguf"
256-
with patch("llm_utils.AutoTokenizer.from_pretrained") as mock_from_pretrained:
256+
with patch("doc_generator.utils.llm_utils.AutoTokenizer.from_pretrained") as mock_from_pretrained:
257257
monkeypatch.delenv("HF_TOKEN", raising=False)
258258
with pytest.raises(KeyError):
259259
get_tokenizer(model_name, gguf_file)
@@ -329,7 +329,7 @@ def test_print_model_details_empty(capsys):
329329
print_model_details(test_models)
330330
captured = capsys.readouterr()
331331
output_lines = captured.out.strip().split("\n")
332-
assert output_lines == [""]
332+
assert output_lines == ["{'Model': 'Total', 'File Count': 0, 'Succeeded': 0, 'Failed': 0, 'Tokens': 0, 'Cost': 0}"]
333333

334334

335335
def test_total_index_cost_estimate():
@@ -360,7 +360,7 @@ def test_total_index_cost_estimate():
360360
),
361361
}
362362

363-
with patch("llm_utils.models", test_models):
363+
with patch("doc_generator.utils.llm_utils.models", test_models):
364364
total_cost = total_index_cost_estimate(None)
365365

366366
expected_cost = sum(
@@ -375,7 +375,7 @@ def test_get_embeddings_llama_model():
375375
model = "llama-something"
376376
device = "cpu"
377377

378-
with patch("llm_utils.HuggingFaceEmbeddings") as mock_hf_embeddings:
378+
with patch("doc_generator.utils.llm_utils.HuggingFaceEmbeddings") as mock_hf_embeddings:
379379
embeddings = get_embeddings(model, device)
380380
mock_hf_embeddings.assert_called_once_with(
381381
model_name="sentence-transformers/all-mpnet-base-v2",
@@ -389,7 +389,7 @@ def test_get_embeddings_non_llama_model():
389389
model = "gpt-3.5-turbo"
390390
device = "cpu"
391391

392-
with patch("llm_utils.OpenAIEmbeddings") as mock_openai_embeddings:
392+
with patch("doc_generator.utils.llm_utils.OpenAIEmbeddings") as mock_openai_embeddings:
393393
embeddings = get_embeddings(model, device)
394394
mock_openai_embeddings.assert_called_once_with()
395395
assert embeddings == mock_openai_embeddings.return_value

0 commit comments

Comments
 (0)