|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
import gradio as gr |
|
import random |
|
|
|
|
|
model_options = { |
|
"distilgpt2": "distilgpt2", |
|
"GPT-Neo 125M": "EleutherAI/gpt-neo-125M", |
|
"GPT-Neo 1.3B": "EleutherAI/gpt-neo-1.3B", |
|
} |
|
|
|
|
|
default_model_name = model_options["GPT-Neo 125M"] |
|
tokenizer = AutoTokenizer.from_pretrained(default_model_name) |
|
model = AutoModelForCausalLM.from_pretrained(default_model_name) |
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) |
|
|
|
|
|
names = ["WPMPOperator 1", "John Doe", "Ali Khan"] |
|
work_types = ["Routine pump maintenance", "Valve inspection", "Chemical handling"] |
|
locations = ["Pump House 1", "Main Valve Station", "Chemical Storage Area"] |
|
durations = [30, 45, 60] |
|
|
|
observations = { |
|
"Plant": [ |
|
"Energy sources controlled", |
|
"Plant well maintained", |
|
"Leaks/spills contained", |
|
"Protection from hazards in place", |
|
"Access/egress clear", |
|
"Layout and work locations safe", |
|
"Housekeeping standard high", |
|
], |
|
"People": [ |
|
"Work location protected", |
|
"Work positions safe", |
|
"People competent", |
|
"Hazards understood", |
|
"PPE appropriate", |
|
"Risk to others avoided", |
|
"Distractions absent", |
|
], |
|
"Process": [ |
|
"Procedures valid", |
|
"Correct procedures used", |
|
"Control of work standard applied", |
|
"Control of work requirements understood", |
|
"Risk documented", |
|
"Job Safety Analysis participation", |
|
"Work well organized, systematic", |
|
"Communication effective", |
|
"Change managed safely", |
|
], |
|
"Performance": [ |
|
"Work pressure does not compromise safety", |
|
"Safety priority messaged by leadership", |
|
"Supervision appropriate", |
|
"Pace appropriate/safe", |
|
"Safety performance recognized", |
|
], |
|
} |
|
|
|
|
|
comments_pool = [ |
|
"No major issues observed.", |
|
"Corrective actions were taken immediately.", |
|
"Workers followed safety protocols.", |
|
"Area was well-maintained.", |
|
"Minor deviations flagged for correction.", |
|
"Immediate action required for identified hazards.", |
|
] |
|
|
|
|
|
def set_seed(seed_value): |
|
random.seed(seed_value) |
|
|
|
|
|
def generate_section_observations(section_name): |
|
results = [] |
|
for obs in observations[section_name]: |
|
status = random.choice(["Good Practice", "Deviation", "NA"]) |
|
comment = random.choice(comments_pool) if status != "NA" else "" |
|
results.append((obs, status, comment)) |
|
return results |
|
|
|
|
|
def generate_soc(model_choice, severity, seed=None): |
|
if seed: |
|
set_seed(seed) |
|
|
|
|
|
global generator |
|
model_name = model_options[model_choice] |
|
if generator.tokenizer.name_or_path != model_name: |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) |
|
|
|
|
|
observer_name = random.choice(names) |
|
work_type = random.choice(work_types) |
|
location = random.choice(locations) |
|
duration = random.choice(durations) |
|
|
|
|
|
severity_levels = { |
|
1: "minor concerns and deviations.", |
|
2: "moderate safety concerns requiring attention.", |
|
3: "serious safety issues needing urgent action.", |
|
} |
|
severity_text = severity_levels.get(severity, "moderate safety concerns.") |
|
|
|
|
|
prompt = f""" |
|
Generate a Safety Observation and Conversation (SOC) report for the following details: |
|
Observer's Name: {observer_name} |
|
KOC ID No.: [Insert KOC ID here] |
|
Type of Work Observed: {work_type} |
|
Location: {location} |
|
Duration: {duration} minutes |
|
Severity: {severity_text} |
|
Include: |
|
- Key safety conclusions. |
|
- Randomized observations for Plant, People, Process, and Performance sections. |
|
Format output similar to the MyHSSE layout with numbered observations and status. |
|
""" |
|
|
|
|
|
report = generator(prompt, max_length=1024, num_return_sequences=1)[0]["generated_text"] |
|
|
|
|
|
plant_obs = generate_section_observations("Plant") |
|
people_obs = generate_section_observations("People") |
|
process_obs = generate_section_observations("Process") |
|
performance_obs = generate_section_observations("Performance") |
|
|
|
def format_section(section_title, observations): |
|
formatted = f"\n{section_title} Observations:\n" |
|
formatted += "\n".join( |
|
f"{i+1}. {obs[0]} - {obs[1]}\n Comments: {obs[2]}" for i, obs in enumerate(observations) |
|
) |
|
return formatted |
|
|
|
|
|
report += format_section("Plant", plant_obs) |
|
report += format_section("People", people_obs) |
|
report += format_section("Process", process_obs) |
|
report += format_section("Performance", performance_obs) |
|
|
|
return report |
|
|
|
|
|
def app_interface(model_choice, severity, seed=None): |
|
return generate_soc(model_choice, severity, seed) |
|
|
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("# AI-Generated Safety Observation and Conversation (SOC) Reports") |
|
gr.Markdown( |
|
""" |
|
Generate detailed SOC reports in the MyHSSE format using AI assistance. |
|
Customize with multiple AI models, severity levels, and reproducibility using seeds. |
|
""" |
|
) |
|
|
|
with gr.Row(): |
|
model_choice = gr.Dropdown( |
|
label="Select AI Model", |
|
choices=list(model_options.keys()), |
|
value="GPT-Neo 125M", |
|
) |
|
severity_slider = gr.Slider( |
|
label="Severity of SOC Report", |
|
minimum=1, |
|
maximum=3, |
|
step=1, |
|
value=2, |
|
) |
|
seed_input = gr.Number(label="Seed (Optional)", value=None, precision=0) |
|
|
|
output_box = gr.Textbox( |
|
label="Generated SOC Report", |
|
placeholder="Your SOC report will appear here...", |
|
lines=30, |
|
) |
|
|
|
with gr.Row(): |
|
generate_btn = gr.Button("Generate SOC Report") |
|
copy_btn = gr.Button("Copy to Clipboard") |
|
|
|
generate_btn.click(app_interface, inputs=[model_choice, severity_slider, seed_input], outputs=output_box) |
|
copy_btn.click(lambda text: text, inputs=output_box, outputs=None) |
|
|
|
|
|
app.launch() |