sohoso commited on
Commit
71d15da
·
verified ·
1 Parent(s): 596b3cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -56
app.py CHANGED
@@ -1,12 +1,4 @@
1
- import time
2
- import os
3
- import multiprocessing
4
- import torch
5
- import requests
6
- import asyncio
7
- import json
8
- import aiohttp
9
- import logging
10
  from minivectordb.embedding_model import EmbeddingModel
11
  from minivectordb.vector_database import VectorDatabase
12
  from text_util_en_pt.cleaner import structurize_text, detect_language, Language
@@ -18,11 +10,6 @@ torch.set_num_threads(2)
18
  openrouter_key = os.environ.get("OPENROUTER_KEY")
19
  model = EmbeddingModel(use_quantized_onnx_model=True)
20
 
21
-
22
-
23
- # Configure logging
24
- logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
25
-
26
  def fetch_links(query, max_results=10):
27
  return list(search(query, num_results=max_results))
28
 
@@ -37,7 +24,7 @@ def index_and_search(query, text):
37
 
38
  # Indexing
39
  vector_db = VectorDatabase()
40
- sentences = [s['sentence'] for s in structurize_text(text)]
41
 
42
  for idx, sentence in enumerate(sentences):
43
  sentence_embedding = model.extract_embeddings(sentence)
@@ -58,27 +45,25 @@ def generate_search_terms(message, lang):
58
  prompt = f"From the following text, generate some search terms: \"{message}\"\nYour answer should be just the most appropriate search term, and nothing else."
59
 
60
  url = "https://openrouter.ai/api/v1/chat/completions"
61
- headers = {"Content-Type": "application/json",
62
- "Authorization": f"Bearer {openrouter_key}"}
63
- body = {"stream": False,
64
- "models": [
65
- "mistralai/mistral-7b-instruct:free",
66
- "openchat/openchat-7b:free"
67
- ],
68
- "route": "fallback",
69
- "max_tokens": 1024,
70
- "messages": [
71
- {"role": "user", "content": prompt}
72
- ]}
 
 
 
 
73
 
74
  response = requests.post(url, headers=headers, json=body)
75
- response_json = response.json()
76
- choices = response_json.get('choices', [])
77
- if choices:
78
- return choices[0].get('message', {}).get('content', 'Default content if key is missing')
79
- else:
80
- logging.error(f'No choices available in the response: {response_json}')
81
- return 'No valid search terms generated'
82
 
83
  async def predict(message, history):
84
  full_response = ""
@@ -139,18 +124,22 @@ async def predict(message, history):
139
  full_response += "\nResponse: "
140
 
141
  url = "https://openrouter.ai/api/v1/chat/completions"
142
- headers = {"Content-Type": "application/json",
143
- "Authorization": f"Bearer {openrouter_key}"}
144
- body = {"stream": True,
145
- "models": [
146
- "mistralai/mistral-7b-instruct:free",
147
- "openchat/openchat-7b:free"
148
- ],
149
- "route": "fallback",
150
- "max_tokens": 1024,
151
- "messages": [
152
- {"role": "user", "content": prompt}
153
- ]}
 
 
 
 
154
 
155
  async with aiohttp.ClientSession() as session:
156
  async with session.post(url, headers=headers, json=body) as response:
@@ -177,7 +166,6 @@ async def predict(message, history):
177
  except Exception:
178
  pass
179
 
180
-
181
  # Define a custom theme with colors similar to the provided image
182
  custom_theme = gr.themes.Default(
183
  primary_hue="#6a0dad", # Custom primary color (similar to the purple in the image)
@@ -189,20 +177,16 @@ custom_theme = gr.themes.Default(
189
  text_color="#f0f0f0" # Custom text color for better contrast
190
  )
191
 
192
- def predict(message, history):
193
- # Your predict function implementation
194
- pass# Apply the custom theme to the Gradio interface
195
-
196
  gr.ChatInterface(
197
  predict,
198
- title="Live Web Chat",
199
- description="Ultimate Research Assistant",
200
  retry_btn=None,
201
  undo_btn=None,
202
  examples=[
203
  'What is the current sentiment of the Brazil election?',
204
- 'Compare the current economies of China & India',
205
- 'What are the new design trends in 2024',
206
  ],
207
  theme=custom_theme # Apply the custom theme
208
- ).launch()
 
1
+ import time, os, multiprocessing, torch, requests, asyncio, json, aiohttp
 
 
 
 
 
 
 
 
2
  from minivectordb.embedding_model import EmbeddingModel
3
  from minivectordb.vector_database import VectorDatabase
4
  from text_util_en_pt.cleaner import structurize_text, detect_language, Language
 
10
  openrouter_key = os.environ.get("OPENROUTER_KEY")
11
  model = EmbeddingModel(use_quantized_onnx_model=True)
12
 
 
 
 
 
 
13
  def fetch_links(query, max_results=10):
14
  return list(search(query, num_results=max_results))
15
 
 
24
 
25
  # Indexing
26
  vector_db = VectorDatabase()
27
+ sentences = [ s['sentence'] for s in structurize_text(text)]
28
 
29
  for idx, sentence in enumerate(sentences):
30
  sentence_embedding = model.extract_embeddings(sentence)
 
45
  prompt = f"From the following text, generate some search terms: \"{message}\"\nYour answer should be just the most appropriate search term, and nothing else."
46
 
47
  url = "https://openrouter.ai/api/v1/chat/completions"
48
+ headers = {
49
+ "Content-Type": "application/json",
50
+ "Authorization": f"Bearer {openrouter_key}"
51
+ }
52
+ body = {
53
+ "stream": False,
54
+ "models": [
55
+ "mistralai/mistral-7b-instruct:free",
56
+ "openchat/openchat-7b:free"
57
+ ],
58
+ "route": "fallback",
59
+ "max_tokens": 1024,
60
+ "messages": [
61
+ {"role": "user", "content": prompt}
62
+ ]
63
+ }
64
 
65
  response = requests.post(url, headers=headers, json=body)
66
+ return response.json()['choices'][0]['message']['content']
 
 
 
 
 
 
67
 
68
  async def predict(message, history):
69
  full_response = ""
 
124
  full_response += "\nResponse: "
125
 
126
  url = "https://openrouter.ai/api/v1/chat/completions"
127
+ headers = {
128
+ "Content-Type": "application/json",
129
+ "Authorization": f"Bearer {openrouter_key}"
130
+ }
131
+ body = {
132
+ "stream": True,
133
+ "models": [
134
+ "mistralai/mistral-7b-instruct:free",
135
+ "openchat/openchat-7b:free"
136
+ ],
137
+ "route": "fallback",
138
+ "max_tokens": 1024,
139
+ "messages": [
140
+ {"role": "user", "content": prompt}
141
+ ]
142
+ }
143
 
144
  async with aiohttp.ClientSession() as session:
145
  async with session.post(url, headers=headers, json=body) as response:
 
166
  except Exception:
167
  pass
168
 
 
169
  # Define a custom theme with colors similar to the provided image
170
  custom_theme = gr.themes.Default(
171
  primary_hue="#6a0dad", # Custom primary color (similar to the purple in the image)
 
177
  text_color="#f0f0f0" # Custom text color for better contrast
178
  )
179
 
180
+ # Apply the custom theme to the Gradio interface
 
 
 
181
  gr.ChatInterface(
182
  predict,
183
+ title="Assistant pro",
 
184
  retry_btn=None,
185
  undo_btn=None,
186
  examples=[
187
  'What is the current sentiment of the Brazil election?',
188
+ 'Compare the current economies of China and India?',
189
+ 'What are new shoe design trends in 2024',
190
  ],
191
  theme=custom_theme # Apply the custom theme
192
+ ).launch()