support batch infer in vllm
This commit is contained in:
@@ -1,2 +1,3 @@
|
||||
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||
template: qwen2_vl
|
||||
infer_backend: huggingface # choices: [huggingface, vllm]
|
||||
|
||||
Reference in New Issue
Block a user