Spaces:
Running
Running
peterbonnesoeur
commited on
Commit
·
c7690a1
1
Parent(s):
7391c29
Updated: Rework of the code
Browse files- New method to call the model to make the calls more efficient
- Solved the memory issue with several models loading at the same time
- Added animal pose detection
- Added white overlay option to focus more on the keypoint sets
app.py
CHANGED
@@ -1,49 +1,61 @@
|
|
1 |
-
|
2 |
-
import torch
|
3 |
import gradio as gr
|
4 |
-
import openpifpaf
|
5 |
-
import numpy as np
|
6 |
|
7 |
-
|
8 |
-
predictor_whole_body = openpifpaf.Predictor(checkpoint='shufflenetv2k30-wholebody')
|
9 |
-
predictor_vehicle = openpifpaf.Predictor(checkpoint='shufflenetv2k16-apollo-24')
|
10 |
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
def inference(img, ver):
|
13 |
if ver == 'pose':
|
14 |
-
|
15 |
elif ver == 'whole-body':
|
16 |
-
|
|
|
17 |
elif ver == 'vehicles':
|
18 |
-
|
|
|
|
|
|
|
19 |
else:
|
20 |
raise ValueError('invalid version')
|
21 |
|
22 |
-
|
23 |
-
annotation_painter = openpifpaf.show.AnnotationPainter()
|
24 |
-
with openpifpaf.show.image_canvas(img, fig_file = "test.jpg") as ax:
|
25 |
-
annotation_painter.annotations(ax, predictions)
|
26 |
-
|
27 |
-
out = Image.open("test.jpg")
|
28 |
-
return out
|
29 |
|
30 |
|
31 |
-
title = "Openpifpaf"
|
32 |
-
description = "Gradio demo for openpifpaf. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below and don't hesitate to SMASH THAT LIKE BUTTON (
|
33 |
article = "<p style='text-align: center'><a href='https://github.com/openpifpaf/openpifpaf' target='_blank'>Github Repo Openpifpaf</a> | <a href='https://github.com/peterbonnesoeur' target='_blank'>Github Repo peterbonnesoeur</a></p>"
|
34 |
|
35 |
with open("article.html", "r", encoding='utf-8') as f:
|
36 |
article= f.read()
|
37 |
|
38 |
examples=[
|
39 |
-
['
|
40 |
-
['crowd.jpg','pose'],
|
41 |
['bill.png','whole-body'],
|
42 |
['billie.png','whole-body'],
|
43 |
-
['
|
|
|
|
|
|
|
|
|
44 |
['india-vehicles.jpeg', 'vehicles'],
|
45 |
['russia-vehicles.jpg', 'vehicles'],
|
46 |
['paris-vehicles.jpg', 'vehicles'],
|
|
|
47 |
]
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
|
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
4 |
+
def inference(img, ver, white_overlay):
|
|
|
|
|
5 |
|
6 |
+
if white_overlay:
|
7 |
+
white_overlay = "--white-overlay=0.3"
|
8 |
+
else:
|
9 |
+
white_overlay = ""
|
10 |
|
|
|
11 |
if ver == 'pose':
|
12 |
+
os.system("python -m openpifpaf.predict "+img.name+" --checkpoint=shufflenetv2k30 --line-width=4 " + white_overlay + " -o out.jpg")
|
13 |
elif ver == 'whole-body':
|
14 |
+
os.system("python -m openpifpaf.predict "+img.name+" --checkpoint=shufflenetv2k30-wholebody --instance-threshold 0.05 " + white_overlay + " --seed-threshold 0.05 \
|
15 |
+
--line-width 3 -o out.jpg")
|
16 |
elif ver == 'vehicles':
|
17 |
+
os.system("python -m openpifpaf.predict "+img.name+" --checkpoint=shufflenetv2k16-apollo-24 --line-width=5 " + white_overlay + " -o out.jpg")
|
18 |
+
elif ver == 'animal':
|
19 |
+
os.system("python -m openpifpaf.predict "+img.name+" --checkpoint=shufflenetv2k30-animalpose --line-width=5 --font-size=6 " + white_overlay + " \
|
20 |
+
--long-edge=500 -o out.jpg")
|
21 |
else:
|
22 |
raise ValueError('invalid version')
|
23 |
|
24 |
+
return "out.jpg"
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
|
27 |
+
title = "Openpifpaf - pose estimation"
|
28 |
+
description = "Gradio demo for openpifpaf. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below and don't hesitate to SMASH THAT LIKE BUTTON (and you do not have a dislike there either so...)"
|
29 |
article = "<p style='text-align: center'><a href='https://github.com/openpifpaf/openpifpaf' target='_blank'>Github Repo Openpifpaf</a> | <a href='https://github.com/peterbonnesoeur' target='_blank'>Github Repo peterbonnesoeur</a></p>"
|
30 |
|
31 |
with open("article.html", "r", encoding='utf-8') as f:
|
32 |
article= f.read()
|
33 |
|
34 |
examples=[
|
35 |
+
['basketball.jpg','whole-body'],
|
|
|
36 |
['bill.png','whole-body'],
|
37 |
['billie.png','whole-body'],
|
38 |
+
['meeting.jpeg','pose'],
|
39 |
+
['crowd.jpg','pose'],
|
40 |
+
['dalmatian.jpg', 'animal'],
|
41 |
+
['tappo_loomo.jpg', 'animal'],
|
42 |
+
['cow.jpg', 'animal'],
|
43 |
['india-vehicles.jpeg', 'vehicles'],
|
44 |
['russia-vehicles.jpg', 'vehicles'],
|
45 |
['paris-vehicles.jpg', 'vehicles'],
|
46 |
+
|
47 |
]
|
48 |
+
|
49 |
+
gr.Interface(
|
50 |
+
inference,
|
51 |
+
[
|
52 |
+
gr.inputs.Image(type="file", label="Input"),
|
53 |
+
gr.inputs.Radio(['whole-body', 'pose', 'vehicles', 'animal'], type="value", default='whole-body', label='version'),
|
54 |
+
gr.inputs.Checkbox(default=False, label="White overlay")
|
55 |
+
],
|
56 |
+
gr.outputs.Image(type="file", label="Output"),
|
57 |
+
title=title,
|
58 |
+
description=description,
|
59 |
+
article=article,
|
60 |
+
enable_queue=True,
|
61 |
+
examples=examples).launch()
|