Abhaykoul commited on
Commit
4f61861
·
verified ·
1 Parent(s): 4b51872

Create chatv1.py

Browse files
Files changed (1) hide show
  1. chatv1.py +109 -0
chatv1.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ from typing import Generator, Optional
4
+ import os
5
+ from dotenv import load_dotenv
6
+ import re
7
+ from fastapi import FastAPI, HTTPException, Query
8
+ from fastapi.responses import StreamingResponse
9
+ from pydantic import BaseModel
10
+
11
+ # Load environment variables from .env file
12
+ load_dotenv()
13
+
14
+ app = FastAPI()
15
+
16
+ class ChatRequest(BaseModel):
17
+ user_prompt: str
18
+ system_prompt: Optional[str] = "You are a helpful AI assistant."
19
+
20
+ class CHATv1:
21
+ """
22
+ A class to interact with the CHATv1.info API.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ timeout: int = 300,
28
+ proxies: dict = {},
29
+ ):
30
+ """
31
+ Initializes the CHATv1.info API with given parameters.
32
+
33
+ Args:
34
+ timeout (int, optional): Http request timeout. Defaults to 300.
35
+ proxies (dict, optional): Http request proxies. Defaults to {}.
36
+ """
37
+ self.session = requests.Session()
38
+ self.api_endpoint = os.getenv("CHATv1")
39
+ self.timeout = timeout
40
+ self.headers = {
41
+ "content-type": "application/json",
42
+ }
43
+ self.session.headers.update(self.headers)
44
+ self.session.proxies = proxies
45
+
46
+ def ask(self, user_prompt: str, system_prompt: str) -> Generator[str, None, None]:
47
+ """
48
+ Chat with AI
49
+
50
+ Args:
51
+ user_prompt (str): User's prompt to be sent.
52
+ system_prompt (str): System prompt to set the AI's behavior.
53
+
54
+ Yields:
55
+ str: Incremental text responses.
56
+ """
57
+ payload = {
58
+ "messages": [
59
+ {
60
+ "role": "system",
61
+ "content": system_prompt
62
+ },
63
+ {
64
+ "role": "user",
65
+ "content": user_prompt
66
+ }
67
+ ]
68
+ }
69
+
70
+ response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
71
+
72
+ if not response.ok:
73
+ raise HTTPException(
74
+ status_code=response.status_code,
75
+ detail=f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
76
+ )
77
+
78
+ buffer = ""
79
+ for line in response.iter_lines(decode_unicode=True):
80
+ if line:
81
+ if line.startswith("data: "):
82
+ data_str = line[6:]
83
+ try:
84
+ data_json = json.loads(data_str)
85
+ content = data_json.get("data", "")
86
+ if content:
87
+ buffer += content
88
+ lines = buffer.split('\n')
89
+ if len(lines) > 1:
90
+ for complete_line in lines[:-1]:
91
+ yield self.format_text(complete_line) + '\n'
92
+ buffer = lines[-1]
93
+ except json.JSONDecodeError:
94
+ pass
95
+
96
+ if buffer:
97
+ yield self.format_text(buffer)
98
+
99
+ yield "[DONE]"
100
+
101
+ def format_text(self, text: str) -> str:
102
+ text = re.sub(r'\*(.*?)\*', r'<i>\1</i>', text)
103
+ return text
104
+
105
+ def chat(self, user_prompt: str, system_prompt: str) -> Generator[str, None, None]:
106
+ """Stream responses as string chunks"""
107
+ return self.ask(user_prompt, system_prompt)
108
+
109
+