@@ -30,7 +30,7 @@ def test_rl_dataset():
3030 from verl .utils import hf_tokenizer
3131 from verl .utils .dataset .rl_dataset import RLHFDataset , collate_fn
3232
33- tokenizer = hf_tokenizer (" deepseek-ai/deepseek-coder-1.3b-instruct" )
33+ tokenizer = hf_tokenizer (os . path . expanduser ( "~/models/ deepseek-ai/deepseek-coder-1.3b-instruct") )
3434 local_path = get_gsm8k_data ()
3535 config = OmegaConf .create (
3636 {
@@ -70,7 +70,7 @@ def test_rl_dataset_with_max_samples():
7070 from verl .utils import hf_tokenizer
7171 from verl .utils .dataset .rl_dataset import RLHFDataset
7272
73- tokenizer = hf_tokenizer (" deepseek-ai/deepseek-coder-1.3b-instruct" )
73+ tokenizer = hf_tokenizer (os . path . expanduser ( "~/models/ deepseek-ai/deepseek-coder-1.3b-instruct") )
7474 local_path = get_gsm8k_data ()
7575 config = OmegaConf .create (
7676 {
@@ -89,8 +89,8 @@ def test_image_rl_data():
8989 from verl .utils import hf_processor , hf_tokenizer
9090 from verl .utils .dataset .rl_dataset import RLHFDataset , collate_fn
9191
92- tokenizer = hf_tokenizer (" Qwen/Qwen2-VL-2B-Instruct" )
93- processor = hf_processor (" Qwen/Qwen2-VL-2B-Instruct" )
92+ tokenizer = hf_tokenizer (os . path . expanduser ( "~/models/ Qwen/Qwen2-VL-2B-Instruct") )
93+ processor = hf_processor (os . path . expanduser ( "~/models/ Qwen/Qwen2-VL-2B-Instruct") )
9494 config = OmegaConf .create (
9595 {
9696 "prompt_key" : "prompt" ,
0 commit comments