h-siyuan commited on
Commit
22ed136
·
verified ·
1 Parent(s): c181606

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -14
app.py CHANGED
@@ -34,21 +34,14 @@ for file in files:
34
  file_path = hf_hub_download(repo_id=model_repo, filename=file, local_dir=destination_folder)
35
  print(f"Downloaded {file} to {file_path}")
36
 
37
- @spaces.GPU
38
- def get_model_processor():
39
- # Load the model
40
- model = Qwen2VLForConditionalGeneration.from_pretrained(
41
- "./showui-2b",
42
- torch_dtype=torch.bfloat16,
43
- device_map="cuda",
44
- )
45
-
46
- # Load the processor
47
- processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=MIN_PIXELS, max_pixels=MAX_PIXELS)
48
 
49
- return model, processor
50
-
51
- model, processor = get_model_processor()
52
 
53
  # Helper functions
54
  def draw_point(image_input, point=None, radius=5):
@@ -90,6 +83,10 @@ def run_showui(image, query):
90
  ]
91
 
92
  # Prepare inputs for the model
 
 
 
 
93
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
94
  image_inputs, video_inputs = process_vision_info(messages)
95
  inputs = processor(
 
34
  file_path = hf_hub_download(repo_id=model_repo, filename=file, local_dir=destination_folder)
35
  print(f"Downloaded {file} to {file_path}")
36
 
37
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
38
+ "./showui-2b",
39
+ torch_dtype=torch.bfloat16,
40
+ device_map="cpu",
41
+ )
 
 
 
 
 
 
42
 
43
+ # Load the processor
44
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=MIN_PIXELS, max_pixels=MAX_PIXELS)
 
45
 
46
  # Helper functions
47
  def draw_point(image_input, point=None, radius=5):
 
83
  ]
84
 
85
  # Prepare inputs for the model
86
+
87
+ model = model.to("cuda")
88
+ processor = processor.to("cuda")
89
+
90
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
91
  image_inputs, video_inputs = process_vision_info(messages)
92
  inputs = processor(