Soc / app.py
Oranblock's picture
Update app.py
2417a18 verified
raw
history blame
4.33 kB
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import random
import gradio as gr
# Model options
model_options = {
"distilgpt2": "distilgpt2",
"GPT-Neo 125M": "EleutherAI/gpt-neo-125M",
}
# Load default model
default_model_name = model_options["GPT-Neo 125M"]
tokenizer = AutoTokenizer.from_pretrained(default_model_name)
model = AutoModelForCausalLM.from_pretrained(default_model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) # Use CPU
# Predefined options for randomization
names = ["John Doe", "Jane Smith", "Ali Khan"]
locations = ["Pump House 1", "Main Valve Station", "Chemical Storage Area"]
work_types = ["Routine pump maintenance", "Valve inspection", "Chemical handling"]
durations = [30, 45, 60]
good_practices = ["Good Practice"]
deviations = ["Deviation"]
plant_observations = [
("Energy sources controlled", "Good Practice", "Lockout/tagout procedures were followed."),
("Leaks/spills contained", "Deviation", "Oil spill near a pump flagged for cleanup."),
("Housekeeping standard high", "Deviation", "Scattered tools were organized after reminder."),
]
# Function to set seed
def set_seed(seed_value):
random.seed(seed_value)
# AI-based SOC report generation
def generate_soc(model_choice, seed=None):
# Set seed if provided
if seed:
set_seed(seed)
# Update the generator if model_choice changes
global generator
model_name = model_options[model_choice]
if generator.tokenizer.name_or_path != model_name:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
# Randomized fields
observer_name = random.choice(names)
location = random.choice(locations)
work_type = random.choice(work_types)
duration = random.choice(durations)
# Generate random plant observations
observations = "\n".join(
f"{i+1}. {obs[0]}\n{obs[1]}\n{obs[2]}"
for i, obs in enumerate(random.sample(plant_observations, len(plant_observations)))
)
# AI Prompt
prompt = f"""
Write a detailed Safety Observation and Conversation (SOC) report for a water injection plant.
Key Safety Conclusions/Comments/Agreements Made:
Briefly summarize safety observations, key concerns, and corrective actions.
Observer's Name: {observer_name}
KOC ID No.: [Insert KOC ID here]
Type of Work Observed: {work_type}
Location: {location}
Duration (in mins): {duration}
--- Plant Observations:
{observations}
--- People Observations:
Include details on PPE compliance, hazard understanding, and good practices or deviations.
--- Process Observations:
Summarize job safety analysis, procedures followed, and improvements needed.
--- Performance Observations:
Evaluate the overall safety performance, including work pace and supervision.
"""
result = generator(prompt, max_length=512, num_return_sequences=1)[0]["generated_text"]
return result
# Gradio Interface
def app_interface(model_choice, seed):
return generate_soc(model_choice, seed)
# Gradio Layout
with gr.Blocks() as app:
gr.Markdown("# AI-Generated Safety Observation and Conversation (SOC) Reports")
gr.Markdown(
"""
Generate detailed SOC reports for a water injection plant using AI assistance.
Customize your report with multiple models, randomization, and reproducibility through seeds.
"""
)
with gr.Row():
model_choice = gr.Dropdown(
label="Select Model",
choices=list(model_options.keys()),
value="GPT-Neo 125M",
)
seed = gr.Number(label="Seed (Optional)", value=None, precision=0)
output_box = gr.Textbox(
label="Generated SOC Report",
placeholder="Your SOC report will appear here...",
lines=30,
)
with gr.Row():
generate_btn = gr.Button("Generate SOC Report")
copy_btn = gr.Button("Copy to Clipboard")
generate_btn.click(app_interface, inputs=[model_choice, seed], outputs=output_box)
copy_btn.click(lambda text: text, inputs=output_box, outputs=None)
# Launch the app
app.launch()