Spaces:
Paused
Paused
ehristoforu
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import spaces
|
4 |
-
from transformers import
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
from threading import Thread
|
7 |
|
@@ -77,7 +77,7 @@ def chat_openchat_36(message: str,
|
|
77 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
78 |
conversation.append({"role": "user", "content": message})
|
79 |
|
80 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
81 |
|
82 |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
83 |
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import spaces
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
from threading import Thread
|
7 |
|
|
|
77 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
78 |
conversation.append({"role": "user", "content": message})
|
79 |
|
80 |
+
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
81 |
|
82 |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
83 |
|