代码拉取完成,页面将自动刷新
import torch
from modelscope import snapshot_download
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
# 修改默认缓存路径
import os
import numpy
print('torch版本:',torch.__version__)
print('numpy版本:',numpy.__version__)
print(f"当前设备: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'CPU'}")
os.environ['MODELSCOPE_CACHE'] = 'D:\dev-venv\modelscope'
# model_dir = snapshot_download('Qwen/Qwen2.5-VL-7B-Instruct')
model_dir = snapshot_download('Qwen/Qwen2.5-VL-7B-Instruct')
# default: Load the model on the available device(s)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
model_dir, torch_dtype=torch.bfloat16, device_map="auto"
)
processor = AutoProcessor.from_pretrained(model_dir,max_pixels=1028*28*28)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "C:/Users/33506/Desktop/1740100328531.jpg",
"description": "这是一个拍摄水库溢洪道口的图片",
},
{"type": "text",
"text": "判断画面中是否存在钓鱼或游泳的人员存在"},
],
}
]
# Preparation for inference
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=1000)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。