Skip to content

Commit dad7ca6

Browse files
committed
release v0.1.3
Former-commit-id: 62c68bcbf591516e8f90b47810bea6f710fd23f6
1 parent a146813 commit dad7ca6

4 files changed

Lines changed: 11 additions & 8 deletions

File tree

requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
torch>=1.13.1
22
transformers>=4.29.1
33
datasets>=2.12.0
4-
accelerate>=0.19.0
5-
peft>=0.3.0
4+
accelerate>=0.21.0
5+
peft>=0.4.0
66
trl>=0.4.7
77
sentencepiece
88
jieba

src/llmtuner/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from llmtuner.chat import ChatModel
22

33

4-
__version__ = "0.1.2"
4+
__version__ = "0.1.3"

src/llmtuner/tuner/core/loader.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@
2727

2828
check_min_version("4.29.1")
2929
require_version("datasets>=2.12.0", "To fix: pip install datasets>=2.12.0")
30-
require_version("accelerate>=0.19.0", "To fix: pip install accelerate>=0.19.0")
31-
require_version("peft>=0.3.0", "To fix: pip install peft>=0.3.0")
30+
require_version("accelerate>=0.21.0", "To fix: pip install accelerate>=0.21.0")
31+
require_version("peft>=0.4.0", "To fix: pip install peft>=0.4.0")
3232
require_version("trl>=0.4.7", "To fix: pip install trl>=0.4.7")
3333

3434

@@ -81,9 +81,6 @@ def load_model_and_tokenizer(
8181

8282
elif model_args.quantization_bit == 4:
8383
require_version("bitsandbytes>=0.39.0", "To fix: pip install bitsandbytes>=0.39.0")
84-
require_version("transformers>=4.30.1", "To fix: pip install transformers>=4.30.1")
85-
require_version("accelerate>=0.20.3", "To fix: pip install accelerate>=0.20.3")
86-
require_version("peft>=0.4.0.dev0", "To fix: pip install git+https://github.com/huggingface/peft.git")
8784
config_kwargs["load_in_4bit"] = True
8885
config_kwargs["quantization_config"] = BitsAndBytesConfig(
8986
load_in_4bit=True,

src/llmtuner/webui/chat.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,12 @@ def predict(
8484
query, history, prefix, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature
8585
):
8686
response += new_text
87+
response = self.postprocess(response)
8788
new_history = history + [(query, response)]
8889
chatbot[-1] = [query, response]
8990
yield chatbot, new_history
91+
92+
def postprocess(self, response: str) -> str:
93+
response = response.replace("<", "&lt;")
94+
response = response.replace(">", "&gt;")
95+
return response

0 commit comments

Comments
 (0)