Spaces:
Running
Running
lukestanley
commited on
Commit
·
abbebf8
1
Parent(s):
293af3a
Add JSONL disk file logging functionality to app.py and utils.py
Browse files
app.py
CHANGED
@@ -50,12 +50,25 @@ Help make the internet a kinder place, one comment at a time. Your contribution
|
|
50 |
"""
|
51 |
|
52 |
from chill import improvement_loop
|
|
|
|
|
53 |
|
|
|
|
|
|
|
|
|
54 |
|
55 |
def chill_out(text):
|
|
|
|
|
|
|
|
|
|
|
56 |
print("Got this input:", text)
|
57 |
result: dict = improvement_loop(text)
|
58 |
print("Got this result:", result)
|
|
|
|
|
59 |
|
60 |
formatted_output = f"""
|
61 |
<div>
|
|
|
50 |
"""
|
51 |
|
52 |
from chill import improvement_loop
|
53 |
+
import uuid
|
54 |
+
from datetime import datetime
|
55 |
|
56 |
+
def log_to_jsonl(file_path, data):
|
57 |
+
with open(file_path, 'a') as file:
|
58 |
+
jsonl_str = json.dumps(data) + "\n"
|
59 |
+
file.write(jsonl_str)
|
60 |
|
61 |
def chill_out(text):
|
62 |
+
log_entry = {
|
63 |
+
"uuid": str(uuid.uuid4()),
|
64 |
+
"timestamp": datetime.utcnow().isoformat(),
|
65 |
+
"input": text
|
66 |
+
}
|
67 |
print("Got this input:", text)
|
68 |
result: dict = improvement_loop(text)
|
69 |
print("Got this result:", result)
|
70 |
+
log_entry["output"] = result
|
71 |
+
log_to_jsonl('inputs_and_outputs.jsonl', log_entry)
|
72 |
|
73 |
formatted_output = f"""
|
74 |
<div>
|
utils.py
CHANGED
@@ -1,7 +1,9 @@
|
|
|
|
1 |
import json
|
2 |
from time import time, sleep
|
3 |
from os import environ as env
|
4 |
from typing import Any, Dict, Union
|
|
|
5 |
|
6 |
import requests
|
7 |
from huggingface_hub import hf_hub_download
|
@@ -233,17 +235,33 @@ def llm_stream_mistral_api(prompt: str, pydantic_model_class) -> Union[str, Dict
|
|
233 |
print("No pydantic model class provided, returning without class validation")
|
234 |
return json.loads(output)
|
235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
def query_ai_prompt(prompt, replacements, model_class):
|
237 |
prompt = replace_text(prompt, replacements)
|
238 |
if LLM_WORKER == "mistral":
|
239 |
-
|
240 |
if LLM_WORKER == "mistral":
|
241 |
-
|
242 |
if LLM_WORKER == "runpod":
|
243 |
-
|
244 |
if LLM_WORKER == "http":
|
245 |
-
|
246 |
if LLM_WORKER == "in_memory":
|
247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
|
|
|
249 |
|
|
|
1 |
+
import datetime
|
2 |
import json
|
3 |
from time import time, sleep
|
4 |
from os import environ as env
|
5 |
from typing import Any, Dict, Union
|
6 |
+
import uuid
|
7 |
|
8 |
import requests
|
9 |
from huggingface_hub import hf_hub_download
|
|
|
235 |
print("No pydantic model class provided, returning without class validation")
|
236 |
return json.loads(output)
|
237 |
|
238 |
+
def log_to_jsonl(file_path, data):
|
239 |
+
with open(file_path, 'a') as file:
|
240 |
+
jsonl_str = json.dumps(data) + "\n"
|
241 |
+
file.write(jsonl_str)
|
242 |
+
|
243 |
+
|
244 |
def query_ai_prompt(prompt, replacements, model_class):
|
245 |
prompt = replace_text(prompt, replacements)
|
246 |
if LLM_WORKER == "mistral":
|
247 |
+
result = llm_stream_mistral_api(prompt, model_class)
|
248 |
if LLM_WORKER == "mistral":
|
249 |
+
result = llm_stream_mistral_api(prompt, model_class)
|
250 |
if LLM_WORKER == "runpod":
|
251 |
+
result = llm_stream_serverless(prompt, model_class)
|
252 |
if LLM_WORKER == "http":
|
253 |
+
result = llm_streaming(prompt, model_class)
|
254 |
if LLM_WORKER == "in_memory":
|
255 |
+
result = llm_stream_sans_network(prompt, model_class)
|
256 |
+
|
257 |
+
log_entry = {
|
258 |
+
"uuid": str(uuid.uuid4()),
|
259 |
+
"timestamp": datetime.datetime.utcnow().isoformat(),
|
260 |
+
"worker": LLM_WORKER,
|
261 |
+
"prompt_input": prompt,
|
262 |
+
"prompt_output": result
|
263 |
+
}
|
264 |
+
log_to_jsonl('prompt_inputs_and_outputs.jsonl', log_entry)
|
265 |
|
266 |
+
return result
|
267 |
|