Spaces:
Running
Running
File size: 2,424 Bytes
c7690a1 9369100 c7690a1 9369100 b3c78e6 b29351f b3c78e6 b29351f 9369100 b29351f c7690a1 b29351f 9369100 c7690a1 2ab2985 411e32e c7690a1 9369100 b3c78e6 2ab2985 c7690a1 b3c78e6 2ab2985 c7690a1 2ab2985 b29351f c6ecdf4 c7690a1 2ab2985 c7690a1 f78e85a 2ab2985 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import os
import gradio as gr
def inference(img, ver, white_overlay):
if white_overlay:
white_overlay = "--white-overlay=0.3"
else:
white_overlay = ""
if ver == 'pose':
os.system(f"python -m openpifpaf.predict {img} --checkpoint=shufflenetv2k30 --line-width=4 {white_overlay} -o out.jpg")
elif ver == 'whole-body':
os.system(f"python -m openpifpaf.predict {img} --checkpoint=shufflenetv2k30-wholebody --instance-threshold 0.05 {white_overlay} --seed-threshold 0.05 --line-width 3 -o out.jpg")
elif ver == 'vehicles':
os.system(f"python -m openpifpaf.predict {img} --checkpoint=shufflenetv2k16-apollo-24 --line-width=5 {white_overlay} -o out.jpg")
elif ver == 'animal':
os.system(f"python -m openpifpaf.predict {img} --checkpoint=shufflenetv2k30-animalpose --line-width=5 --font-size=6 {white_overlay} --long-edge=500 -o out.jpg")
else:
raise ValueError('invalid version')
return "out.jpg"
title = "Openpifpaf - pose estimation for human, vehicles and animals"
description = "Gradio demo for openpifpaf. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below and don't hesitate to SMASH THAT LIKE BUTTON (and you do not have a dislike there either so...)"
article = "<p style='text-align: center'><a href='https://github.com/openpifpaf/openpifpaf' target='_blank'>Github Repo Openpifpaf</a> | <a href='https://github.com/peterbonnesoeur' target='_blank'>Github Repo peterbonnesoeur</a></p>"
with open("article.html", "r", encoding='utf-8') as f:
article = f.read()
examples = [
['basketball.jpg', 'whole-body'],
['bill.png', 'whole-body'],
['billie.png', 'whole-body'],
['meeting.jpeg', 'pose'],
['crowd.jpg', 'pose'],
['dalmatian.jpg', 'animal'],
['tappo_loomo.jpg', 'animal'],
['cow.jpg', 'animal'],
['india-vehicles.jpeg', 'vehicles'],
['russia-vehicles.jpg', 'vehicles'],
['paris-vehicles.jpg', 'vehicles'],
]
gr.Interface(
fn=inference,
inputs=[
gr.Image(type="filepath", label="Input"),
gr.Radio(['whole-body', 'pose', 'vehicles', 'animal'], value='whole-body', label='version'),
gr.Checkbox(value=False, label="White overlay")
],
outputs=gr.Image(type="filepath", label="Output"),
title=title,
description=description,
article=article,
examples=examples
).launch()
|