-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Expand file tree
/
Copy pathtest_diffusion_agent_loop.py
More file actions
131 lines (111 loc) · 4.88 KB
/
test_diffusion_agent_loop.py
File metadata and controls
131 lines (111 loc) · 4.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import ray
from omegaconf import DictConfig
from PIL import Image
from verl.experimental.agent_loop.diffusion_agent_loop import DiffusionAgentLoopManager
from verl.protocol import DataProto
@pytest.fixture
def init_config() -> DictConfig:
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
config = compose(config_name="ppo_diffusion_trainer")
model_path = os.path.expanduser("~/models/Qwen/Qwen-Image")
config.actor_rollout_ref.model.path = model_path
config.actor_rollout_ref.model.tokenizer_path = os.path.join(model_path, "tokenizer")
config.actor_rollout_ref.rollout.name = "vllm_omni"
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.enforce_eager = True
config.actor_rollout_ref.rollout.n = 4
config.actor_rollout_ref.rollout.num_inference_steps = 10
config.actor_rollout_ref.rollout.guidance_scale = 1.0
config.actor_rollout_ref.rollout.agent.num_workers = 2
config.actor_rollout_ref.rollout.skip_tokenizer_init = True
config.actor_rollout_ref.rollout.agent.default_agent_loop = "diffusion_single_turn_agent"
config.actor_rollout_ref.rollout.sde_window_size = 3
config.actor_rollout_ref.rollout.sde_window_range = [0, 5]
qwen_pipeline = "verl.workers.utils.vllm_omni_patch.pipelines.pipeline_qwenimage.QwenImagePipelineWithLogProb"
config.actor_rollout_ref.rollout.engine_kwargs.vllm_omni = {"custom_pipeline": qwen_pipeline}
config.data.custom_cls.path = "verl/utils/dataset/qwen_dataset.py"
config.data.custom_cls.name = "QwenDataset"
config.reward.reward_manager.name = "diffusion"
config.trainer.n_gpus_per_node = 4
tokenizer_max_length = 1024
prompt_template_encode_start_idx = 34
max_length = tokenizer_max_length + prompt_template_encode_start_idx
config.data.apply_chat_template_kwargs = dict(max_length=max_length, padding=True, truncation=True)
config.data.max_prompt_length = max_length
config.actor_rollout_ref.rollout.max_model_len = max_length
# TODO (mike): test with TP later
config.actor_rollout_ref.rollout.tensor_model_parallel_size = 1
return config
def test_single_turn(init_config):
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
}
}
)
agent_loop_manager = DiffusionAgentLoopManager(init_config)
system_prompt = (
"Describe the image by detailing the color, shape, size, texture, quantity, text, "
"spatial relationships of the objects and background:"
)
user_prompts = ["A photo of cute cat with long fur and big eyes.", "A photo of cute dog with short hair."]
raw_prompts = []
for user_prompt in user_prompts:
raw_prompts.append(
[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
)
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array(raw_prompts),
"data_source": np.array(["jpeg_compressibility"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": ""}] * len(raw_prompts)),
},
)
n = init_config.actor_rollout_ref.rollout.n
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
expected_batch_keys = [
"responses",
"all_latents",
"all_timesteps",
"prompt_embeds",
"prompt_embeds_mask",
"input_ids",
"attention_mask",
]
for key in expected_batch_keys:
assert key in result.batch, f"Key {key} not found in result batch."
# check turns
num_turns = result.non_tensor_batch["__num_turns__"]
assert np.all(num_turns == 2)
# TODO: for visualization, drop later
images_pil = (result.batch["responses"].permute(0, 2, 3, 1).numpy() * 255.0).astype("uint8")
for i, image in enumerate(images_pil):
image_path = os.path.join(f"{i}.jpg")
Image.fromarray(image).save(image_path)
print("Test passed!")
ray.shutdown()