vakodiya commited on
Commit
67e2cbc
·
1 Parent(s): c6545c6

Streamlit app using langchain and gpt2 model

Browse files
Files changed (2) hide show
  1. app.py +26 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
3
+ from langchain.prompts import PromptTemplate
4
+
5
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
6
+ model = GPT2LMHeadModel.from_pretrained('gpt2')
7
+
8
+ st.title("GPT 2 Chat Bot?")
9
+ input_text = st.text_area("Enter text to classify:")
10
+
11
+
12
+ if st.button("""Enter >>>>> """):
13
+ if input_text:
14
+ prompt_template = PromptTemplate(template="Answer the following question and classify it: {question}",
15
+ input_variables=["question"], output_variables=["answer", "classification"])
16
+ # Model loading
17
+ format_prompt = prompt_template.format(question=input_text)
18
+ encoded_input = tokenizer(format_prompt, return_tensors='pt')
19
+ # Run the model
20
+ output = model.generate(**encoded_input, max_length=100) # Use generate method for text generation
21
+ # Decode the model output to text
22
+ decoded_output = tokenizer.decode(output[0])
23
+ response_text = decoded_output.split('\n\n')
24
+ st.write(response_text[1])
25
+
26
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ langchain==0.2.5
3
+ langchain-community==0.2.5
4
+ python-multipart==0.0.9
5
+ transformers==4.41.2
6
+ torch==2.3.1
7
+ tensorflow==2.16.2