Skip to content

Commit 5d1a42a

Browse files
committed
python sample
1 parent afe925c commit 5d1a42a

File tree

1 file changed

+125
-0
lines changed

1 file changed

+125
-0
lines changed
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
import argparse
2+
import numpy as np
3+
import openvino_genai as ov_genai
4+
5+
from pathlib import Path
6+
from PIL import Image
7+
from openvino import Tensor
8+
9+
10+
def streamer(subword: str) -> bool:
11+
"""
12+
13+
Args:
14+
subword: sub-word of the generated text.
15+
16+
Returns: Return flag corresponds whether generation should be stopped.
17+
18+
"""
19+
print(subword, end="", flush=True)
20+
21+
# No value is returned as in this example we don't want to stop the generation in this method.
22+
# "return None" will be treated the same as "return openvino_genai.StreamingStatus.RUNNING".
23+
24+
25+
def read_image(path: str) -> Tensor:
26+
"""
27+
28+
Args:
29+
path: The path to the image.
30+
31+
Returns: the ov.Tensor containing the image.
32+
33+
"""
34+
pic = Image.open(path).convert("RGB")
35+
image_data = np.array(pic)
36+
return Tensor(image_data)
37+
38+
39+
def read_images(path: str) -> list[Tensor]:
40+
entry = Path(path)
41+
if entry.is_dir():
42+
return [read_image(str(file)) for file in sorted(entry.iterdir())]
43+
return [read_image(path)]
44+
45+
46+
def parse_lora_pairs(raw):
47+
if len(raw) % 2 != 0:
48+
raise argparse.ArgumentTypeError(
49+
"LoRA args must come in pairs: <LORA_SAFETENSORS> <ALPHA> ..."
50+
)
51+
52+
pairs = []
53+
for i in range(0, len(raw), 2):
54+
path = raw[i]
55+
try:
56+
alpha = float(raw[i + 1])
57+
except ValueError as e:
58+
raise argparse.ArgumentTypeError(
59+
f"Invalid alpha '{raw[i+1]}' for LoRA '{path}'"
60+
) from e
61+
pairs.append((path, alpha))
62+
return pairs
63+
64+
65+
def main() -> int:
66+
p = argparse.ArgumentParser(
67+
description="OpenVINO GenAI VLM sample: run with and without LoRA adapters.",
68+
formatter_class=argparse.RawTextHelpFormatter,
69+
)
70+
p.add_argument("model_dir", help="Path to model directory")
71+
p.add_argument("images_path", help="Image file OR directory with images")
72+
p.add_argument("device", choices=["CPU", "GPU"], help='Device, e.g. "CPU", "GPU"')
73+
p.add_argument(
74+
"lora_pairs",
75+
nargs="+",
76+
metavar=("LORA", "ALPHA"),
77+
help="Pairs: <LORA_SAFETENSORS> <ALPHA> ...",
78+
)
79+
80+
args = p.parse_args()
81+
loras = parse_lora_pairs(args.lora_pairs)
82+
83+
rgbs = read_images(args.images_path)
84+
85+
pipe_kwargs = {}
86+
if args.device == "GPU":
87+
pipe_kwargs["ov_config"] = {"CACHE_DIR": "vlm_cache"}
88+
89+
# Configure LoRA adapters with weights (alphas)
90+
if loras:
91+
adapter_config = ov_genai.AdapterConfig()
92+
for lora_path, alpha in loras:
93+
adapter_config.add(ov_genai.Adapter(lora_path), alpha)
94+
pipe_kwargs["adapters"] = adapter_config
95+
96+
pipe = ov_genai.VLMPipeline(args.model_dir, args.device, **pipe_kwargs)
97+
98+
gen_cfg = pipe.get_generation_config()
99+
gen_cfg.max_new_tokens = 100
100+
101+
prompt = input("question:\n")
102+
103+
print("----------\nGenerating answer with LoRA adapters applied:\n")
104+
pipe.generate(
105+
prompt,
106+
images=rgbs,
107+
generation_config=gen_cfg,
108+
streamer=streamer,
109+
)
110+
111+
print("\n----------\nGenerating answer without LoRA adapters applied:\n")
112+
pipe.generate(
113+
prompt,
114+
images=rgbs,
115+
generation_config=gen_cfg,
116+
adapters=ov_genai.AdapterConfig(),
117+
streamer=streamer,
118+
)
119+
120+
print("\n----------")
121+
return 0
122+
123+
124+
if __name__ == "__main__":
125+
main()

0 commit comments

Comments
 (0)