Connoriginal commited on
Commit
0387210
·
1 Parent(s): d6928b1

add second version

Browse files
Files changed (3) hide show
  1. app.py +22 -6
  2. chatbot.py +27 -9
  3. requirements.txt +4 -3
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import time
2
  import yaml
3
  import gradio as gr
@@ -8,11 +9,14 @@ from chatbot import ChatBot
8
 
9
  # MARKDOWN
10
  MARKDOWN = """
11
- # Coffee-Gym Feedback Model
12
- Welcome to the COFFEE-GYM demo page! This page will guide you through using our comprehensive RL environment for training models that provide feedback on code editing.
 
 
 
13
 
14
  ## Prompt template
15
- To use the COFFEE-GYM feedback model, you can follow the prompt template below:
16
 
17
  ~~~json
18
  Problem Description:
@@ -33,9 +37,20 @@ Feel free to explore and make the most out of COFFEE-GYM!
33
  """
34
 
35
 
36
- def main():
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- chatbot = ChatBot()
39
 
40
  with gr.Blocks() as app:
41
  ##### Playground #####
@@ -70,4 +85,5 @@ def main():
70
 
71
 
72
  if __name__ == "__main__":
73
- main()
 
 
1
+ import openai
2
  import time
3
  import yaml
4
  import gradio as gr
 
9
 
10
  # MARKDOWN
11
  MARKDOWN = """
12
+ # Coffee-Gym
13
+ Welcome to the COFFEE-GYM demo page!
14
+
15
+ ## DS-Coder-7B-PPO-CoffeeEval
16
+ You can try using the COFFEE-GYM feedback model (DS-Coder-7B-PPO-CoffeeEval) to get feedback on your code.
17
 
18
  ## Prompt template
19
+ To use the DS-Coder-7B-PPO-CoffeeEvall, you can follow the prompt template below:
20
 
21
  ~~~json
22
  Problem Description:
 
37
  """
38
 
39
 
40
+
41
+
42
+ # get arguments from command line
43
+ def get_args():
44
+ parser = argparse.ArgumentParser()
45
+ parser.add_argument("--model_name", type=str, default="Team-Coffee-Gym/DS-Coder-7B-PPO-CoffeeEval", help="Model name to use for the chatbot.")
46
+ parser.add_argument("--url", type=str, default="https://editor.jp.ngrok.io/v1", help="URL to use for the chatbot.")
47
+ args = parser.parse_args()
48
+ return args
49
+
50
+
51
+ def main(args):
52
 
53
+ chatbot = ChatBot(model_name=args.model_name, url=args.url)
54
 
55
  with gr.Blocks() as app:
56
  ##### Playground #####
 
85
 
86
 
87
  if __name__ == "__main__":
88
+ args = get_args()
89
+ main(args)
chatbot.py CHANGED
@@ -1,19 +1,36 @@
 
1
  import time
2
  import json
3
  import yaml
4
  from typing import Union
5
  import os
6
-
7
-
8
- from transformers import pipeline
 
 
9
 
10
 
11
  class ChatBot(object):
12
- def __init__(self):
13
- self.model = pipe = pipeline("text-generation", model="Team-Coffee-Gym/CoffeeGym")
 
 
 
14
  self.chat_history = []
15
  self.history = self._set_initial_history()
16
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def _set_initial_history(self):
19
  return ["You are an exceptionally intelligent coding assistant developed by DLI lab that consistently delivers accurate and reliable responses to user instructions. If somebody asks you who are you, answer as 'AI programming assistant based on DLI Lab'.\n\n"]
@@ -50,14 +67,14 @@ class ChatBot(object):
50
 
51
  self.chat_history = chat_history
52
 
53
- model_input = self.set_model_input(input_text)
54
- response = self.model(model_input)
 
55
 
56
  if response is not None:
57
  self.history.append(response)
58
  self.chat_history = self.chat_history + [(input_text, response)]
59
 
60
- self.log_chat()
61
  return self.chat_history
62
 
63
 
@@ -66,7 +83,8 @@ class ChatBot(object):
66
  self.chat_history = chat_history[:-1]
67
  self.history = self.history[:-2]
68
 
69
- model_input = self.set_model_input(None)
 
70
  response = self.model.invoke(model_input)
71
 
72
  if response is not None:
 
1
+ import openai
2
  import time
3
  import json
4
  import yaml
5
  from typing import Union
6
  import os
7
+ from langchain_openai import OpenAI
8
+ from transformers import AutoTokenizer
9
+ from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
10
+ from langchain_community.chat_models import ChatOpenAI
11
+ from langchain.callbacks import get_openai_callback
12
 
13
 
14
  class ChatBot(object):
15
+ def __init__(self, model_name, url):
16
+ self.model_name = model_name
17
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
18
+ self.url = url
19
+ self.model = self._set_model()
20
  self.chat_history = []
21
  self.history = self._set_initial_history()
22
 
23
+
24
+ def _set_model(self):
25
+ return OpenAI(
26
+ model_name=self.model_name,
27
+ openai_api_base = f"{self.url}",
28
+ openai_api_key="EMPTY",
29
+ temperature = 0.1,
30
+ top_p = 0.95,
31
+ max_tokens = 1024,
32
+ max_retries=3
33
+ )
34
 
35
  def _set_initial_history(self):
36
  return ["You are an exceptionally intelligent coding assistant developed by DLI lab that consistently delivers accurate and reliable responses to user instructions. If somebody asks you who are you, answer as 'AI programming assistant based on DLI Lab'.\n\n"]
 
67
 
68
  self.chat_history = chat_history
69
 
70
+ model_input_list = self.set_model_input(input_text)
71
+ model_input = self.tokenizer.apply_chat_template(model_input_list, tokenize=False, add_generation_prompt=True)
72
+ response = self.model.invoke(model_input)
73
 
74
  if response is not None:
75
  self.history.append(response)
76
  self.chat_history = self.chat_history + [(input_text, response)]
77
 
 
78
  return self.chat_history
79
 
80
 
 
83
  self.chat_history = chat_history[:-1]
84
  self.history = self.history[:-2]
85
 
86
+ model_input_list = self.set_model_input(None)
87
+ model_input = self.tokenizer.apply_chat_template(model_input_list, tokenize=False, add_generation_prompt=True)
88
  response = self.model.invoke(model_input)
89
 
90
  if response is not None:
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  huggingface_hub==0.23.0
2
- transformers==4.41.2
3
- gradio==4.37.2
4
- gradio-client==1.0.2
 
 
1
  huggingface_hub==0.23.0
2
+ transformers
3
+ gradio
4
+ langchain
5
+ langchain_openai