Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -15,33 +15,33 @@ from huggingface_hub import hf_hub_download, list_repo_files
|
|
15 |
# Define constants
|
16 |
DESCRIPTION = "[UGround Demo](https://osu-nlp-group.github.io/UGround/)"
|
17 |
_SYSTEM = "You are a very helpful assistant."
|
18 |
-
MIN_PIXELS =
|
19 |
-
MAX_PIXELS =
|
20 |
|
21 |
# Specify the model repository and destination folder
|
22 |
# https://huggingface.co/osunlp/UGround-V1-2B
|
23 |
model_repo = "osunlp/UGround-V1-2B"
|
24 |
destination_folder = "./UGround-V1-2B"
|
25 |
-
|
26 |
-
# Ensure the destination folder exists
|
27 |
-
os.makedirs(destination_folder, exist_ok=True)
|
28 |
-
|
29 |
-
# List all files in the repository
|
30 |
-
files = list_repo_files(repo_id=model_repo)
|
31 |
-
|
32 |
-
# Download each file to the destination folder
|
33 |
-
for file in files:
|
34 |
-
|
35 |
-
|
36 |
|
37 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
38 |
-
|
39 |
torch_dtype=torch.bfloat16,
|
40 |
device_map="cpu",
|
41 |
)
|
42 |
|
43 |
# Load the processor
|
44 |
-
processor = AutoProcessor.from_pretrained(
|
45 |
|
46 |
# Helper functions
|
47 |
def draw_point(image_input, point=None, radius=5):
|
@@ -134,7 +134,7 @@ def run_showui(image, query, session_id, iterations=1):
|
|
134 |
)[0]
|
135 |
|
136 |
click_xy = ast.literal_eval(output_text)
|
137 |
-
|
138 |
|
139 |
# Draw point on the current image
|
140 |
result_image = draw_point(image_path, click_xy, radius=10)
|
|
|
15 |
# Define constants
|
16 |
DESCRIPTION = "[UGround Demo](https://osu-nlp-group.github.io/UGround/)"
|
17 |
_SYSTEM = "You are a very helpful assistant."
|
18 |
+
MIN_PIXELS = 802816
|
19 |
+
MAX_PIXELS = 1806336
|
20 |
|
21 |
# Specify the model repository and destination folder
|
22 |
# https://huggingface.co/osunlp/UGround-V1-2B
|
23 |
model_repo = "osunlp/UGround-V1-2B"
|
24 |
destination_folder = "./UGround-V1-2B"
|
25 |
+
#
|
26 |
+
# # Ensure the destination folder exists
|
27 |
+
# os.makedirs(destination_folder, exist_ok=True)
|
28 |
+
#
|
29 |
+
# # List all files in the repository
|
30 |
+
# files = list_repo_files(repo_id=model_repo)
|
31 |
+
#
|
32 |
+
# # Download each file to the destination folder
|
33 |
+
# for file in files:
|
34 |
+
# file_path = hf_hub_download(repo_id=model_repo, filename=file, local_dir=destination_folder)
|
35 |
+
# print(f"Downloaded {file} to {file_path}")
|
36 |
|
37 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
38 |
+
model_repo,
|
39 |
torch_dtype=torch.bfloat16,
|
40 |
device_map="cpu",
|
41 |
)
|
42 |
|
43 |
# Load the processor
|
44 |
+
processor = AutoProcessor.from_pretrained(model_repo, min_pixels=MIN_PIXELS, max_pixels=MAX_PIXELS)
|
45 |
|
46 |
# Helper functions
|
47 |
def draw_point(image_input, point=None, radius=5):
|
|
|
134 |
)[0]
|
135 |
|
136 |
click_xy = ast.literal_eval(output_text)
|
137 |
+
|
138 |
|
139 |
# Draw point on the current image
|
140 |
result_image = draw_point(image_path, click_xy, radius=10)
|