Spaces:
Sleeping
Sleeping
Upload 9 files
Browse files- Docsbotport.html +44 -41
- ServChar.py +74 -0
- ServFire.py +124 -0
- ServG4F.py +106 -0
- clientCharacter.py +59 -0
- clientFireworks.py +98 -0
- clientG4F.py +88 -0
- comp.html +8 -6
- flowise.html +9 -7
Docsbotport.html
CHANGED
@@ -1,40 +1,41 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
<html>
|
3 |
-
<head>
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
</
|
36 |
-
|
37 |
-
<
|
|
|
38 |
<div id="status">
|
39 |
<span>Module Running:</span>
|
40 |
<span class="led led-off" id="module-led"></span>
|
@@ -44,14 +45,16 @@
|
|
44 |
<br>
|
45 |
<div id="status-msg"></div>
|
46 |
</div>
|
47 |
-
<input type="text" id="port" placeholder="websocket port">
|
48 |
-
<
|
|
|
|
|
49 |
<input type="text" id="inputbox" placeholder="Type your message here...">
|
50 |
-
<
|
|
|
|
|
51 |
<div id="chatbox"></div>
|
52 |
-
<br><br>
|
53 |
-
<button id="clearbtn">New Chat (or Clear)</button>
|
54 |
-
<button id="testbtn">Test Server</button>
|
55 |
<p id="result"></p>
|
56 |
<script>
|
57 |
const mled = document.getElementById("module-led");
|
|
|
1 |
<!DOCTYPE html>
|
2 |
<html>
|
3 |
+
<head>
|
4 |
+
<title>HuggingFace Chat Interface</title>
|
5 |
+
<meta charset="UTF-8">
|
6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
7 |
+
<style>
|
8 |
+
#chatbox {
|
9 |
+
height: 500px;
|
10 |
+
width: 1080px;
|
11 |
+
border: 1px solid black;
|
12 |
+
overflow: auto;
|
13 |
+
padding: 10px;
|
14 |
+
background-color: white;
|
15 |
+
}
|
16 |
+
#inputbox {
|
17 |
+
height: 50px;
|
18 |
+
width: 1080px;
|
19 |
+
border: 1px solid black;
|
20 |
+
padding: 10px;
|
21 |
+
}
|
22 |
+
.led {
|
23 |
+
height: 10px;
|
24 |
+
width: 10px;
|
25 |
+
border-radius: 50%;
|
26 |
+
display: inline-block;
|
27 |
+
margin-right: 5px;
|
28 |
+
}
|
29 |
+
.led-on {
|
30 |
+
background-color: green;
|
31 |
+
}
|
32 |
+
.led-off {
|
33 |
+
background-color: red;
|
34 |
+
}
|
35 |
+
</style>
|
36 |
+
</head>
|
37 |
+
<body>
|
38 |
+
<h1>Chaindesk Chat Interface</h1>
|
39 |
<div id="status">
|
40 |
<span>Module Running:</span>
|
41 |
<span class="led led-off" id="module-led"></span>
|
|
|
45 |
<br>
|
46 |
<div id="status-msg"></div>
|
47 |
</div>
|
48 |
+
<input type="text" id="port" placeholder="websocket port">
|
49 |
+
<input type="text" id="flowise" placeholder="paste your agent id here">
|
50 |
+
<button id="connector">CONNECT TO SERVER</button>
|
51 |
+
<br><br>
|
52 |
<input type="text" id="inputbox" placeholder="Type your message here...">
|
53 |
+
<br><br>
|
54 |
+
<button id="sendbtn">Send</button><button id="clearbtn">New Chat (or Clear)</button><button id="testbtn">Test Server</button>
|
55 |
+
<br><br>
|
56 |
<div id="chatbox"></div>
|
57 |
+
<br><br>
|
|
|
|
|
58 |
<p id="result"></p>
|
59 |
<script>
|
60 |
const mled = document.getElementById("module-led");
|
ServChar.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import sqlite3
|
4 |
+
import datetime
|
5 |
+
import streamlit as st
|
6 |
+
from PyCharacterAI import Client
|
7 |
+
|
8 |
+
class WebSocketServer2:
|
9 |
+
def __init__(self, host, port):
|
10 |
+
self.host = host
|
11 |
+
self.port = port
|
12 |
+
self.server = None
|
13 |
+
|
14 |
+
async def handler(self, websocket):
|
15 |
+
client = Client()
|
16 |
+
if "tokenChar" not in st.session_state:
|
17 |
+
st.session_state.tokenChar = ""
|
18 |
+
if "character_ID" not in st.session_state:
|
19 |
+
st.session_state.character_ID = ""
|
20 |
+
|
21 |
+
await client.authenticate_with_token(st.session_state.tokenChar)
|
22 |
+
char_id = st.session_state.character_ID
|
23 |
+
chat = await client.create_or_continue_chat(char_id)
|
24 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
25 |
+
print('New connection')
|
26 |
+
await websocket.send(instruction)
|
27 |
+
while True:
|
28 |
+
# Receive a message from the client
|
29 |
+
message = await websocket.recv()
|
30 |
+
# Print the message
|
31 |
+
print(f"Server received: {message}")
|
32 |
+
input_Msg = st.chat_message("assistant")
|
33 |
+
input_Msg.markdown(message)
|
34 |
+
timestamp = datetime.datetime.now().isoformat()
|
35 |
+
sender = 'client'
|
36 |
+
db = sqlite3.connect('chat-hub.db')
|
37 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
38 |
+
(sender, message, timestamp))
|
39 |
+
db.commit()
|
40 |
+
try:
|
41 |
+
answer = await chat.send_message(message)
|
42 |
+
response = f"{answer.src_character_name}: {answer.text}"
|
43 |
+
print(response)
|
44 |
+
output_Msg = st.chat_message("ai")
|
45 |
+
output_Msg.markdown(response)
|
46 |
+
timestamp = datetime.datetime.now().isoformat()
|
47 |
+
serverSender = 'server'
|
48 |
+
db = sqlite3.connect('chat-hub.db')
|
49 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
50 |
+
(serverSender, response, timestamp))
|
51 |
+
db.commit()
|
52 |
+
await websocket.send(response)
|
53 |
+
continue
|
54 |
+
|
55 |
+
except Exception as e:
|
56 |
+
print(f"Error: {e}")
|
57 |
+
|
58 |
+
async def start_server(self):
|
59 |
+
self.server = await websockets.serve(
|
60 |
+
self.handler,
|
61 |
+
self.host,
|
62 |
+
self.port
|
63 |
+
)
|
64 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
65 |
+
|
66 |
+
def run_forever(self):
|
67 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
68 |
+
asyncio.get_event_loop().run_forever()
|
69 |
+
|
70 |
+
async def stop_server(self):
|
71 |
+
if self.server:
|
72 |
+
self.server.close()
|
73 |
+
await self.server.wait_closed()
|
74 |
+
print("WebSocket server stopped.")
|
ServFire.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import datetime
|
6 |
+
import g4f
|
7 |
+
import streamlit as st
|
8 |
+
import fireworks.client
|
9 |
+
|
10 |
+
class WebSocketServer:
|
11 |
+
def __init__(self, host, port):
|
12 |
+
self.host = host
|
13 |
+
self.port = port
|
14 |
+
self.server = None
|
15 |
+
|
16 |
+
async def chatCompletion(self, question):
|
17 |
+
|
18 |
+
if "api_key" not in st.session_state:
|
19 |
+
st.session_state.api_key = ""
|
20 |
+
|
21 |
+
fireworks.client.api_key = st.session_state.api_key
|
22 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
23 |
+
|
24 |
+
try:
|
25 |
+
# Connect to the database and get the last 30 messages
|
26 |
+
db = sqlite3.connect('chat-hub.db')
|
27 |
+
cursor = db.cursor()
|
28 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 10")
|
29 |
+
messages = cursor.fetchall()
|
30 |
+
messages.reverse()
|
31 |
+
|
32 |
+
# Extract user inputs and generated responses from the messages
|
33 |
+
past_user_inputs = []
|
34 |
+
generated_responses = []
|
35 |
+
|
36 |
+
for message in messages:
|
37 |
+
if message[1] == 'client':
|
38 |
+
past_user_inputs.append(message[2])
|
39 |
+
else:
|
40 |
+
generated_responses.append(message[2])
|
41 |
+
|
42 |
+
# Prepare data to send to the chatgpt-api.shn.hk
|
43 |
+
response = fireworks.client.ChatCompletion.create(
|
44 |
+
model="accounts/fireworks/models/llama-v2-7b-chat",
|
45 |
+
messages=[
|
46 |
+
{"role": "system", "content": system_instruction},
|
47 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
48 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
49 |
+
{"role": "user", "content": question}
|
50 |
+
],
|
51 |
+
stream=False,
|
52 |
+
n=1,
|
53 |
+
max_tokens=2500,
|
54 |
+
temperature=0.5,
|
55 |
+
top_p=0.7,
|
56 |
+
)
|
57 |
+
|
58 |
+
answer = response.choices[0].message.content
|
59 |
+
print(answer)
|
60 |
+
return str(answer)
|
61 |
+
|
62 |
+
except Exception as error:
|
63 |
+
print("Error while fetching or processing the response:", error)
|
64 |
+
return "Error: Unable to generate a response."
|
65 |
+
|
66 |
+
# Define the handler function that will process incoming messages
|
67 |
+
async def handler(self, websocket):
|
68 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
69 |
+
print('New connection')
|
70 |
+
await websocket.send(instruction)
|
71 |
+
db = sqlite3.connect('chat-hub.db')
|
72 |
+
# Loop forever
|
73 |
+
while True:
|
74 |
+
# Receive a message from the client
|
75 |
+
message = await websocket.recv()
|
76 |
+
# Print the message
|
77 |
+
print(f"Server received: {message}")
|
78 |
+
input_Msg = st.chat_message("assistant")
|
79 |
+
input_Msg.markdown(message)
|
80 |
+
timestamp = datetime.datetime.now().isoformat()
|
81 |
+
sender = 'client'
|
82 |
+
db = sqlite3.connect('chat-hub.db')
|
83 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
84 |
+
(sender, message, timestamp))
|
85 |
+
db.commit()
|
86 |
+
try:
|
87 |
+
response = await self.chatCompletion(message)
|
88 |
+
serverResponse = f"server: {response}"
|
89 |
+
print(serverResponse)
|
90 |
+
output_Msg = st.chat_message("ai")
|
91 |
+
output_Msg.markdown(serverResponse)
|
92 |
+
timestamp = datetime.datetime.now().isoformat()
|
93 |
+
serverSender = 'server'
|
94 |
+
db = sqlite3.connect('chat-hub.db')
|
95 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
96 |
+
(serverSender, serverResponse, timestamp))
|
97 |
+
db.commit()
|
98 |
+
# Append the server response to the server_responses list
|
99 |
+
await websocket.send(serverResponse)
|
100 |
+
continue
|
101 |
+
|
102 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
103 |
+
print(f"Connection closed: {e}")
|
104 |
+
|
105 |
+
except Exception as e:
|
106 |
+
print(f"Error: {e}")
|
107 |
+
|
108 |
+
async def start_server(self):
|
109 |
+
self.server = await websockets.serve(
|
110 |
+
self.handler,
|
111 |
+
self.host,
|
112 |
+
self.port
|
113 |
+
)
|
114 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
115 |
+
|
116 |
+
def run_forever(self):
|
117 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
118 |
+
asyncio.get_event_loop().run_forever()
|
119 |
+
|
120 |
+
async def stop_server(self):
|
121 |
+
if self.server:
|
122 |
+
self.server.close()
|
123 |
+
await self.server.wait_closed()
|
124 |
+
print("WebSocket server stopped.")
|
ServG4F.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import datetime
|
6 |
+
import g4f
|
7 |
+
import streamlit as st
|
8 |
+
|
9 |
+
class WebSocketServer1:
|
10 |
+
def __init__(self, host, port):
|
11 |
+
self.host = host
|
12 |
+
self.port = port
|
13 |
+
self.server = None
|
14 |
+
|
15 |
+
async def askQuestion(self, question):
|
16 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
17 |
+
try:
|
18 |
+
db = sqlite3.connect('chat-hub.db')
|
19 |
+
cursor = db.cursor()
|
20 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
|
21 |
+
messages = cursor.fetchall()
|
22 |
+
messages.reverse()
|
23 |
+
|
24 |
+
past_user_inputs = []
|
25 |
+
generated_responses = []
|
26 |
+
|
27 |
+
for message in messages:
|
28 |
+
if message[1] == 'client':
|
29 |
+
past_user_inputs.append(message[2])
|
30 |
+
else:
|
31 |
+
generated_responses.append(message[2])
|
32 |
+
|
33 |
+
response = await g4f.ChatCompletion.create_async(
|
34 |
+
model=g4f.models.gpt_4,
|
35 |
+
provider=g4f.Provider.Bing,
|
36 |
+
messages=[
|
37 |
+
{"role": "system", "content": system_instruction},
|
38 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
39 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
40 |
+
{"role": "user", "content": question}
|
41 |
+
])
|
42 |
+
|
43 |
+
print(response)
|
44 |
+
return response
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
|
49 |
+
|
50 |
+
async def handler(self, websocket):
|
51 |
+
instruction = "Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT - a project of hierarchical cooperative multi-agent framework. Keep in mind that you are speaking with another chatbot. Please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic. If you're unsure what you should do, ask the instance of higher hierarchy (server)"
|
52 |
+
print('New connection')
|
53 |
+
await websocket.send(instruction)
|
54 |
+
db = sqlite3.connect('chat-hub.db')
|
55 |
+
# Loop forever
|
56 |
+
while True:
|
57 |
+
# Receive a message from the client
|
58 |
+
message = await websocket.recv()
|
59 |
+
# Print the message
|
60 |
+
print(f"Server received: {message}")
|
61 |
+
input_Msg = st.chat_message("assistant")
|
62 |
+
input_Msg.markdown(message)
|
63 |
+
timestamp = datetime.datetime.now().isoformat()
|
64 |
+
sender = 'client'
|
65 |
+
db = sqlite3.connect('chat-hub.db')
|
66 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
67 |
+
(sender, message, timestamp))
|
68 |
+
db.commit()
|
69 |
+
try:
|
70 |
+
response = await self.askQuestion(message)
|
71 |
+
serverResponse = f"server: {response}"
|
72 |
+
print(serverResponse)
|
73 |
+
output_Msg = st.chat_message("ai")
|
74 |
+
output_Msg.markdown(serverResponse)
|
75 |
+
timestamp = datetime.datetime.now().isoformat()
|
76 |
+
serverSender = 'server'
|
77 |
+
db = sqlite3.connect('chat-hub.db')
|
78 |
+
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
|
79 |
+
(serverSender, serverResponse, timestamp))
|
80 |
+
db.commit()
|
81 |
+
# Append the server response to the server_responses list
|
82 |
+
await websocket.send(serverResponse)
|
83 |
+
|
84 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
85 |
+
print(f"Connection closed: {e}")
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error: {e}")
|
89 |
+
|
90 |
+
async def start_server(self):
|
91 |
+
self.server = await websockets.serve(
|
92 |
+
self.handler,
|
93 |
+
self.host,
|
94 |
+
self.port
|
95 |
+
)
|
96 |
+
print(f"WebSocket server started at ws://{self.host}:{self.port}")
|
97 |
+
|
98 |
+
def run_forever(self):
|
99 |
+
asyncio.get_event_loop().run_until_complete(self.start_server())
|
100 |
+
asyncio.get_event_loop().run_forever()
|
101 |
+
|
102 |
+
async def stop_server(self):
|
103 |
+
if self.server:
|
104 |
+
self.server.close()
|
105 |
+
await self.server.wait_closed()
|
106 |
+
print("WebSocket server stopped.")
|
clientCharacter.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import streamlit as st
|
6 |
+
from PyCharacterAI import Client
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient2:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
# Define a function that will run the client in a separate thread
|
15 |
+
def run(self):
|
16 |
+
# Create a thread object
|
17 |
+
self.thread = threading.Thread(target=self.run_client)
|
18 |
+
# Start the thread
|
19 |
+
self.thread.start()
|
20 |
+
|
21 |
+
# Define a function that will run the client using asyncio
|
22 |
+
def run_client(self):
|
23 |
+
# Get the asyncio event loop
|
24 |
+
loop = asyncio.new_event_loop()
|
25 |
+
# Set the event loop as the current one
|
26 |
+
asyncio.set_event_loop(loop)
|
27 |
+
# Run the client until it is stopped
|
28 |
+
loop.run_until_complete(self.client())
|
29 |
+
|
30 |
+
# Define a coroutine that will connect to the server and exchange messages
|
31 |
+
async def startClient(self):
|
32 |
+
client = Client()
|
33 |
+
await client.authenticate_with_token(st.session_state.tokenChar)
|
34 |
+
chat = await client.create_or_continue_chat(st.session_state.character_ID)
|
35 |
+
# Connect to the server
|
36 |
+
async with websockets.connect(self.uri) as websocket:
|
37 |
+
# Loop forever
|
38 |
+
while True:
|
39 |
+
# Listen for messages from the server
|
40 |
+
input_message = await websocket.recv()
|
41 |
+
print(f"Server: {input_message}")
|
42 |
+
input_Msg = st.chat_message("assistant")
|
43 |
+
input_Msg.markdown(input_message)
|
44 |
+
try:
|
45 |
+
answer = await chat.send_message(input_message)
|
46 |
+
response = f"{answer.src_character_name}: {answer.text}"
|
47 |
+
print(response)
|
48 |
+
outputMsg1 = st.chat_message("ai")
|
49 |
+
outputMsg1.markdown(response)
|
50 |
+
await websocket.send(response)
|
51 |
+
continue
|
52 |
+
|
53 |
+
except websockets.ConnectionClosed:
|
54 |
+
print("client disconnected")
|
55 |
+
continue
|
56 |
+
|
57 |
+
except Exception as e:
|
58 |
+
print(f"Error: {e}")
|
59 |
+
continue
|
clientFireworks.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import fireworks.client
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
async def chatCompletion(self, question):
|
15 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
16 |
+
try:
|
17 |
+
# Connect to the database and get the last 30 messages
|
18 |
+
db = sqlite3.connect('chat-hub.db')
|
19 |
+
cursor = db.cursor()
|
20 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 10")
|
21 |
+
messages = cursor.fetchall()
|
22 |
+
messages.reverse()
|
23 |
+
|
24 |
+
# Extract user inputs and generated responses from the messages
|
25 |
+
past_user_inputs = []
|
26 |
+
generated_responses = []
|
27 |
+
|
28 |
+
for message in messages:
|
29 |
+
if message[1] == 'server':
|
30 |
+
past_user_inputs.append(message[2])
|
31 |
+
else:
|
32 |
+
generated_responses.append(message[2])
|
33 |
+
|
34 |
+
# Prepare data to send to the chatgpt-api.shn.hk
|
35 |
+
response = fireworks.client.ChatCompletion.create(
|
36 |
+
model="accounts/fireworks/models/llama-v2-7b-chat",
|
37 |
+
messages=[
|
38 |
+
{"role": "system", "content": system_instruction},
|
39 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
40 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
41 |
+
{"role": "user", "content": question}
|
42 |
+
],
|
43 |
+
stream=False,
|
44 |
+
n=1,
|
45 |
+
max_tokens=2500,
|
46 |
+
temperature=0.5,
|
47 |
+
top_p=0.7,
|
48 |
+
)
|
49 |
+
|
50 |
+
answer = response.choices[0].message.content
|
51 |
+
print(answer)
|
52 |
+
return str(answer)
|
53 |
+
|
54 |
+
except Exception as error:
|
55 |
+
print("Error while fetching or processing the response:", error)
|
56 |
+
return "Error: Unable to generate a response."
|
57 |
+
|
58 |
+
# Define a function that will run the client in a separate thread
|
59 |
+
def run(self):
|
60 |
+
# Create a thread object
|
61 |
+
self.thread = threading.Thread(target=self.run_client)
|
62 |
+
# Start the thread
|
63 |
+
self.thread.start()
|
64 |
+
|
65 |
+
# Define a function that will run the client using asyncio
|
66 |
+
def run_client(self):
|
67 |
+
# Get the asyncio event loop
|
68 |
+
loop = asyncio.new_event_loop()
|
69 |
+
# Set the event loop as the current one
|
70 |
+
asyncio.set_event_loop(loop)
|
71 |
+
# Run the client until it is stopped
|
72 |
+
loop.run_until_complete(self.client())
|
73 |
+
|
74 |
+
# Define a coroutine that will connect to the server and exchange messages
|
75 |
+
async def startClient(self):
|
76 |
+
# Connect to the server
|
77 |
+
async with websockets.connect(self.uri) as websocket:
|
78 |
+
# Loop forever
|
79 |
+
while True:
|
80 |
+
# Listen for messages from the server
|
81 |
+
input_message = await websocket.recv()
|
82 |
+
print(f"Server: {input_message}")
|
83 |
+
input_Msg = st.chat_message("assistant")
|
84 |
+
input_Msg.markdown(input_message)
|
85 |
+
try:
|
86 |
+
response = await self.chatCompletion(input_message)
|
87 |
+
res1 = f"Client: {response}"
|
88 |
+
output_Msg = st.chat_message("ai")
|
89 |
+
output_Msg.markdown(res1)
|
90 |
+
await websocket.send(res1)
|
91 |
+
|
92 |
+
except websockets.ConnectionClosed:
|
93 |
+
print("client disconnected")
|
94 |
+
continue
|
95 |
+
|
96 |
+
except Exception as e:
|
97 |
+
print(f"Error: {e}")
|
98 |
+
continue
|
clientG4F.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import websockets
|
3 |
+
import threading
|
4 |
+
import sqlite3
|
5 |
+
import g4f
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Define the websocket client class
|
9 |
+
class WebSocketClient1:
|
10 |
+
def __init__(self, uri):
|
11 |
+
# Initialize the uri attribute
|
12 |
+
self.uri = uri
|
13 |
+
|
14 |
+
async def askQuestion(self, question):
|
15 |
+
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your main job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (template: <NAME>-agent and/or <NAME>-client). Your chat memory module is integrated with a local SQL database with chat history. Your primary objective is to maintain the logical and chronological order while answering incoming messages and to send your answers to the correct clients to maintain synchronization of the question->answer logic. However, please note that you may choose to ignore or not respond to repeating inputs from specific clients as needed to prevent unnecessary traffic."
|
16 |
+
try:
|
17 |
+
db = sqlite3.connect('chat-hub.db')
|
18 |
+
cursor = db.cursor()
|
19 |
+
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
|
20 |
+
messages = cursor.fetchall()
|
21 |
+
messages.reverse()
|
22 |
+
|
23 |
+
past_user_inputs = []
|
24 |
+
generated_responses = []
|
25 |
+
|
26 |
+
for message in messages:
|
27 |
+
if message[1] == 'server':
|
28 |
+
past_user_inputs.append(message[2])
|
29 |
+
else:
|
30 |
+
generated_responses.append(message[2])
|
31 |
+
|
32 |
+
response = await g4f.ChatCompletion.create_async(
|
33 |
+
model=g4f.models.gpt_4,
|
34 |
+
provider=g4f.Provider.Bing,
|
35 |
+
messages=[
|
36 |
+
{"role": "system", "content": system_instruction},
|
37 |
+
*[{"role": "user", "content": message} for message in past_user_inputs],
|
38 |
+
*[{"role": "assistant", "content": message} for message in generated_responses],
|
39 |
+
{"role": "user", "content": question}
|
40 |
+
])
|
41 |
+
|
42 |
+
print(response)
|
43 |
+
return response
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
print(e)
|
47 |
+
|
48 |
+
# Define a function that will run the client in a separate thread
|
49 |
+
def run(self):
|
50 |
+
# Create a thread object
|
51 |
+
self.thread = threading.Thread(target=self.run_client)
|
52 |
+
# Start the thread
|
53 |
+
self.thread.start()
|
54 |
+
|
55 |
+
# Define a function that will run the client using asyncio
|
56 |
+
def run_client(self):
|
57 |
+
# Get the asyncio event loop
|
58 |
+
loop = asyncio.new_event_loop()
|
59 |
+
# Set the event loop as the current one
|
60 |
+
asyncio.set_event_loop(loop)
|
61 |
+
# Run the client until it is stopped
|
62 |
+
loop.run_until_complete(self.client())
|
63 |
+
|
64 |
+
# Define a coroutine that will connect to the server and exchange messages
|
65 |
+
async def startClient(self):
|
66 |
+
# Connect to the server
|
67 |
+
async with websockets.connect(self.uri) as websocket:
|
68 |
+
# Loop forever
|
69 |
+
while True:
|
70 |
+
# Listen for messages from the server
|
71 |
+
input_message = await websocket.recv()
|
72 |
+
print(f"Server: {input_message}")
|
73 |
+
input_Msg = st.chat_message("assistant")
|
74 |
+
input_Msg.markdown(input_message)
|
75 |
+
try:
|
76 |
+
response = await self.askQuestion(input_message)
|
77 |
+
res1 = f"Client: {response}"
|
78 |
+
output_Msg = st.chat_message("ai")
|
79 |
+
output_Msg.markdown(res1)
|
80 |
+
await websocket.send(res1)
|
81 |
+
|
82 |
+
except websockets.ConnectionClosed:
|
83 |
+
print("client disconnected")
|
84 |
+
continue
|
85 |
+
|
86 |
+
except Exception as e:
|
87 |
+
print(f"Error: {e}")
|
88 |
+
continue
|
comp.html
CHANGED
@@ -6,11 +6,12 @@
|
|
6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
7 |
<style>
|
8 |
#chatbox {
|
9 |
-
height:
|
10 |
width: 1080px;
|
11 |
border: 1px solid black;
|
12 |
overflow: auto;
|
13 |
padding: 10px;
|
|
|
14 |
}
|
15 |
#inputbox {
|
16 |
height: 20px;
|
@@ -46,13 +47,14 @@
|
|
46 |
</div>
|
47 |
<input type="text" id="port" placeholder="websocket port">
|
48 |
<input type="text" id="flowise" placeholder="paste your agent id here">
|
49 |
-
<button id="connector">CONNECT TO SERVER</button>
|
|
|
50 |
<input type="text" id="inputbox" placeholder="Type your message here...">
|
51 |
-
<
|
|
|
|
|
52 |
<div id="chatbox"></div>
|
53 |
-
<br><br>
|
54 |
-
<button id="clearbtn">New Chat (or Clear)</button>
|
55 |
-
<button id="testbtn">Test Server</button>
|
56 |
<p id="result"></p>
|
57 |
<script>
|
58 |
const mled = document.getElementById("module-led");
|
|
|
6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
7 |
<style>
|
8 |
#chatbox {
|
9 |
+
height: 500px;
|
10 |
width: 1080px;
|
11 |
border: 1px solid black;
|
12 |
overflow: auto;
|
13 |
padding: 10px;
|
14 |
+
background-color: white;
|
15 |
}
|
16 |
#inputbox {
|
17 |
height: 20px;
|
|
|
47 |
</div>
|
48 |
<input type="text" id="port" placeholder="websocket port">
|
49 |
<input type="text" id="flowise" placeholder="paste your agent id here">
|
50 |
+
<button id="connector">CONNECT TO SERVER</button>
|
51 |
+
<br><br>
|
52 |
<input type="text" id="inputbox" placeholder="Type your message here...">
|
53 |
+
<br><br>
|
54 |
+
<button id="sendbtn">Send</button><button id="clearbtn">New Chat (or Clear)</button><button id="testbtn">Test Server</button>
|
55 |
+
<br><br>
|
56 |
<div id="chatbox"></div>
|
57 |
+
<br><br>
|
|
|
|
|
58 |
<p id="result"></p>
|
59 |
<script>
|
60 |
const mled = document.getElementById("module-led");
|
flowise.html
CHANGED
@@ -6,11 +6,12 @@
|
|
6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
7 |
<style>
|
8 |
#chatbox {
|
9 |
-
height:
|
10 |
width: 1080px;
|
11 |
border: 1px solid black;
|
12 |
overflow: auto;
|
13 |
padding: 10px;
|
|
|
14 |
}
|
15 |
#inputbox {
|
16 |
height: 20px;
|
@@ -46,13 +47,14 @@
|
|
46 |
</div>
|
47 |
<input type="text" id="port" placeholder="websocket port">
|
48 |
<input type="text" id="flowise" placeholder="paste your agent id here">
|
49 |
-
<button id="connector">CONNECT TO SERVER</button>
|
|
|
50 |
<input type="text" id="inputbox" placeholder="Type your message here...">
|
51 |
-
<
|
|
|
|
|
52 |
<div id="chatbox"></div>
|
53 |
-
<br><br>
|
54 |
-
<button id="clearbtn">New Chat (or Clear)</button>
|
55 |
-
<button id="testbtn">Test Server</button>
|
56 |
<p id="result"></p>
|
57 |
<script>
|
58 |
const mled = document.getElementById("module-led");
|
@@ -87,7 +89,7 @@
|
|
87 |
async function askQuestion(question) {
|
88 |
try {
|
89 |
const flow = flowise.value
|
90 |
-
const url = `https://
|
91 |
const response = await fetch(url, {
|
92 |
method: 'POST',
|
93 |
headers: {'Content-Type': 'application/json',},
|
|
|
6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
7 |
<style>
|
8 |
#chatbox {
|
9 |
+
height: 500px;
|
10 |
width: 1080px;
|
11 |
border: 1px solid black;
|
12 |
overflow: auto;
|
13 |
padding: 10px;
|
14 |
+
background-color: white;
|
15 |
}
|
16 |
#inputbox {
|
17 |
height: 20px;
|
|
|
47 |
</div>
|
48 |
<input type="text" id="port" placeholder="websocket port">
|
49 |
<input type="text" id="flowise" placeholder="paste your agent id here">
|
50 |
+
<button id="connector">CONNECT TO SERVER</button>
|
51 |
+
<br><br>
|
52 |
<input type="text" id="inputbox" placeholder="Type your message here...">
|
53 |
+
<br><br>
|
54 |
+
<button id="sendbtn">Send</button><button id="clearbtn">New Chat (or Clear)</button><button id="testbtn">Test Server</button>
|
55 |
+
<br><br>
|
56 |
<div id="chatbox"></div>
|
57 |
+
<br><br>
|
|
|
|
|
58 |
<p id="result"></p>
|
59 |
<script>
|
60 |
const mled = document.getElementById("module-led");
|
|
|
89 |
async function askQuestion(question) {
|
90 |
try {
|
91 |
const flow = flowise.value
|
92 |
+
const url = `https://raufxd-flowise.hf.space/api/v1/prediction/${flow}`;
|
93 |
const response = await fetch(url, {
|
94 |
method: 'POST',
|
95 |
headers: {'Content-Type': 'application/json',},
|