with model.generate( prompt_input_vllm, max_new_tokens=2048, temperature=0.0, top_p=1.0 ) as generator: outputs = model.generator.output.save() this return no generate function