Spaces:
Running
Running
seawolf2357
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,9 @@ import gradio as gr
|
|
2 |
import requests
|
3 |
import os
|
4 |
|
5 |
-
# 환경 변수에서 Hugging Face API 토큰을 가져옵니다.
|
6 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
7 |
|
8 |
def respond(message, max_tokens=512, temperature=0.7, top_p=0.95):
|
9 |
-
# 메시지를 구성
|
10 |
data = {
|
11 |
"model": "command-r-plus:104b-fp16",
|
12 |
"prompt": message,
|
@@ -14,14 +12,17 @@ def respond(message, max_tokens=512, temperature=0.7, top_p=0.95):
|
|
14 |
"temperature": temperature,
|
15 |
"top_p": top_p
|
16 |
}
|
17 |
-
|
18 |
-
# 외부 API로 요청을 보냄
|
19 |
response = requests.post("http://hugpu.ai:7877/api/generate", json=data)
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
# Gradio 인터페이스 설정
|
25 |
demo = gr.Interface(
|
26 |
fn=respond,
|
27 |
inputs=[
|
|
|
2 |
import requests
|
3 |
import os
|
4 |
|
|
|
5 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
6 |
|
7 |
def respond(message, max_tokens=512, temperature=0.7, top_p=0.95):
|
|
|
8 |
data = {
|
9 |
"model": "command-r-plus:104b-fp16",
|
10 |
"prompt": message,
|
|
|
12 |
"temperature": temperature,
|
13 |
"top_p": top_p
|
14 |
}
|
15 |
+
|
|
|
16 |
response = requests.post("http://hugpu.ai:7877/api/generate", json=data)
|
17 |
+
|
18 |
+
try:
|
19 |
+
generated_text = response.json().get('generated_text', '')
|
20 |
+
except json.JSONDecodeError as e:
|
21 |
+
print("Failed to decode JSON from response:", response.text)
|
22 |
+
generated_text = "An error occurred while processing your request."
|
23 |
+
|
24 |
+
return generated_text
|
25 |
|
|
|
26 |
demo = gr.Interface(
|
27 |
fn=respond,
|
28 |
inputs=[
|