Skip to content

Commit c876b14

Browse files
authored
Bump transformers, peft, and diffusers dependencies (#79)
* Bump `transformers`, `peft`, and `diffusers` dependencies * Add `sentencepiece` extra within `transformers` dependency * Remove `huggingface_inference_toolkit.const.HF_TRUST_REMOTE_CODE` unused import * Set `device_map="balanced"` instead of `"auto"` As of https://github.com/huggingface/diffusers/blob/65e30907b5df1338ae65a20b78866c87e061c952/src/diffusers/pipelines/pipeline_utils.py#L95C26-L95C34 * Update `author` within `setup.py` * Add `huggingface_hub` with `hf_transfer` extra as dependency * Skip `kwargs["tokenizer"]` assignment for `text-to-image` * Skip `pipeline.to(device)` as not required It's not required and not compatible with `device_map="balanced"`
1 parent d9ae3d9 commit c876b14

File tree

3 files changed

+9
-14
lines changed

3 files changed

+9
-14
lines changed

setup.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,9 @@
1313
# libavcodec-extra : libavcodec-extra includes additional codecs for ffmpeg
1414

1515
install_requires = [
16-
"transformers[sklearn,sentencepiece,audio,vision]==4.41.1",
17-
"orjson",
16+
"transformers[sklearn,sentencepiece,audio,vision,sentencepiece]==4.44.0",
17+
"huggingface_hub[hf_transfer]==0.24.5",
18+
"peft==0.12.0",
1819
# vision
1920
"Pillow",
2021
"librosa",
@@ -26,13 +27,13 @@
2627
"starlette",
2728
"uvicorn",
2829
"pandas",
29-
"peft==0.11.1",
30+
"orjson",
3031
]
3132

3233
extras = {}
3334

3435
extras["st"] = ["sentence_transformers==2.7.0"]
35-
extras["diffusers"] = ["diffusers==0.26.3", "accelerate==0.27.2"]
36+
extras["diffusers"] = ["diffusers==0.30.0", "accelerate==0.33.0"]
3637
extras["torch"] = ["torch==2.2.2", "torchvision", "torchaudio"]
3738
extras["test"] = [
3839
"pytest==7.2.1",
@@ -53,7 +54,7 @@
5354
setup(
5455
name="huggingface-inference-toolkit",
5556
version=VERSION,
56-
author="HuggingFace",
57+
author="Hugging Face",
5758
description="Hugging Face Inference Toolkit is for serving 🤗 Transformers models in containers.",
5859
url="",
5960
package_dir={"": "src"},

src/huggingface_inference_toolkit/diffusers_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def __init__(
2828
dtype = torch.float32
2929
if device == "cuda":
3030
dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float16
31-
device_map = "auto" if device == "cuda" else None
31+
device_map = "balanced" if device == "cuda" else None
3232

3333
self.pipeline = AutoPipelineForText2Image.from_pretrained(
3434
model_dir, torch_dtype=dtype, device_map=device_map, **kwargs
@@ -42,8 +42,6 @@ def __init__(
4242
except Exception:
4343
pass
4444

45-
self.pipeline.to(device)
46-
4745
def __call__(
4846
self,
4947
prompt,

src/huggingface_inference_toolkit/utils.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,7 @@
88
from transformers.file_utils import is_tf_available, is_torch_available
99
from transformers.pipelines import Pipeline
1010

11-
from huggingface_inference_toolkit.const import (
12-
HF_DEFAULT_PIPELINE_NAME,
13-
HF_MODULE_NAME,
14-
HF_TRUST_REMOTE_CODE,
15-
)
11+
from huggingface_inference_toolkit.const import HF_DEFAULT_PIPELINE_NAME, HF_MODULE_NAME
1612
from huggingface_inference_toolkit.diffusers_utils import (
1713
get_diffusers_pipeline,
1814
is_diffusers_available,
@@ -240,7 +236,7 @@ def get_pipeline(
240236
"zero-shot-image-classification",
241237
}:
242238
kwargs["feature_extractor"] = model_dir
243-
elif task in {"image-to-text"}:
239+
elif task in {"image-to-text", "text-to-image"}:
244240
pass
245241
elif task == "conversational":
246242
task = "text-generation"

0 commit comments

Comments
 (0)