Ishaan Gupta
commited on
Commit
·
0361a01
1
Parent(s):
6b7380d
custom handler
Browse files- handler.py +53 -0
- requirements.txt +2 -0
- test_handler.py +40 -0
handler.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Any
|
2 |
+
from PIL import Image
|
3 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
4 |
+
import base64
|
5 |
+
from io import BytesIO
|
6 |
+
|
7 |
+
|
8 |
+
class EndpointHandler():
|
9 |
+
def __init__(self, path=""):
|
10 |
+
# Preload all the elements you are going to need at inference.
|
11 |
+
# pseudo:
|
12 |
+
# self.model= load_model(path)
|
13 |
+
self.model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224").to("cuda")
|
14 |
+
self.processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
|
15 |
+
|
16 |
+
# prompt = "<grounding>An image of"
|
17 |
+
|
18 |
+
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
19 |
+
|
20 |
+
prompt = data.pop("prompt")
|
21 |
+
image_base64 = data.pop("image_base64")
|
22 |
+
|
23 |
+
image_data = base64.b64decode(image_base64)
|
24 |
+
image = Image.open(BytesIO(image_data))
|
25 |
+
|
26 |
+
inputs = self.processor(text=prompt, images=image, return_tensors="pt").to("cuda")
|
27 |
+
|
28 |
+
generated_ids = self.model.generate(
|
29 |
+
pixel_values=inputs["pixel_values"],
|
30 |
+
input_ids=inputs["input_ids"],
|
31 |
+
attention_mask=inputs["attention_mask"],
|
32 |
+
image_embeds=None,
|
33 |
+
image_embeds_position_mask=inputs["image_embeds_position_mask"],
|
34 |
+
use_cache=True,
|
35 |
+
max_new_tokens=128,
|
36 |
+
)
|
37 |
+
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
38 |
+
|
39 |
+
# Specify `cleanup_and_extract=False` in order to see the raw model generation.
|
40 |
+
processed_text = self.processor.post_process_generation(generated_text, cleanup_and_extract=False)
|
41 |
+
|
42 |
+
# print(processed_text)
|
43 |
+
# `<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>.`
|
44 |
+
|
45 |
+
# By default, the generated text is cleanup and the entities are extracted.
|
46 |
+
processed_text, entities = self.processor.post_process_generation(generated_text)
|
47 |
+
|
48 |
+
# print(processed_text)
|
49 |
+
# `An image of a snowman warming himself by a fire.`
|
50 |
+
return [{"processed_text": processed_text}]
|
51 |
+
|
52 |
+
# print(entities)
|
53 |
+
# `[('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]`
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
base64
|
2 |
+
pillow
|
test_handler.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from handler import EndpointHandler
|
2 |
+
from PIL import Image
|
3 |
+
import requests
|
4 |
+
import base64
|
5 |
+
from io import BytesIO
|
6 |
+
import time
|
7 |
+
|
8 |
+
# init handler
|
9 |
+
my_handler = EndpointHandler(path=".")
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
# prompt = "<grounding>An image of"
|
14 |
+
prompt = "<grounding>Describe this image in detail"
|
15 |
+
|
16 |
+
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png"
|
17 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
18 |
+
|
19 |
+
# The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
|
20 |
+
image.save("new_image.jpg")
|
21 |
+
image = Image.open("img1.jpg")
|
22 |
+
buffered = BytesIO()
|
23 |
+
image.save(buffered, format='JPEG')
|
24 |
+
image_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
25 |
+
|
26 |
+
# prepare sample payload
|
27 |
+
non_holiday_payload = {"prompt": prompt, "image_base64": image_base64}
|
28 |
+
# holiday_payload = {"inputs": "Today is a though day", "date": "2022-07-04"}
|
29 |
+
init_t = time.time()
|
30 |
+
# test the handler
|
31 |
+
non_holiday_pred=my_handler(non_holiday_payload)
|
32 |
+
# holiday_payload=my_handler(holiday_payload)
|
33 |
+
|
34 |
+
# show results
|
35 |
+
print("image_description", non_holiday_pred)
|
36 |
+
print(time.time() - init_t)
|
37 |
+
# print("holiday_payload", holiday_payload)
|
38 |
+
|
39 |
+
# non_holiday_pred [{'label': 'joy', 'score': 0.9985942244529724}]
|
40 |
+
# holiday_payload [{'label': 'happy', 'score': 1}]
|