File size: 6,605 Bytes
e78c2e1 3715759 e78c2e1 f1e9568 2417a18 3715759 2417a18 f1e9568 2417a18 f1e9568 3715759 2417a18 f1e9568 2417a18 f1e9568 2417a18 2567e33 f1e9568 2567e33 3715759 2417a18 f1e9568 2417a18 f1e9568 2417a18 3715759 2417a18 f1e9568 3715759 f1e9568 e78c2e1 f1e9568 2417a18 f1e9568 2417a18 3715759 f1e9568 e78c2e1 f1e9568 e78c2e1 f1e9568 e78c2e1 f1e9568 3715759 f1e9568 e78c2e1 3715759 e78c2e1 2567e33 e78c2e1 f1e9568 e78c2e1 2417a18 3715759 2417a18 3715759 2417a18 e78c2e1 2567e33 e78c2e1 2567e33 2417a18 3715759 e78c2e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import random
# Model options
model_options = {
"distilgpt2": "distilgpt2",
"GPT-Neo 125M": "EleutherAI/gpt-neo-125M",
"GPT-Neo 1.3B": "EleutherAI/gpt-neo-1.3B",
}
# Load default model
default_model_name = model_options["GPT-Neo 125M"]
tokenizer = AutoTokenizer.from_pretrained(default_model_name)
model = AutoModelForCausalLM.from_pretrained(default_model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
# Randomized options for report fields
names = ["WPMPOperator 1", "John Doe", "Ali Khan"]
work_types = ["Routine pump maintenance", "Valve inspection", "Chemical handling"]
locations = ["Pump House 1", "Main Valve Station", "Chemical Storage Area"]
durations = [30, 45, 60]
observations = {
"Plant": [
"Energy sources controlled",
"Plant well maintained",
"Leaks/spills contained",
"Protection from hazards in place",
"Access/egress clear",
"Layout and work locations safe",
"Housekeeping standard high",
],
"People": [
"Work location protected",
"Work positions safe",
"People competent",
"Hazards understood",
"PPE appropriate",
"Risk to others avoided",
"Distractions absent",
],
"Process": [
"Procedures valid",
"Correct procedures used",
"Control of work standard applied",
"Control of work requirements understood",
"Risk documented",
"Job Safety Analysis participation",
"Work well organized, systematic",
"Communication effective",
"Change managed safely",
],
"Performance": [
"Work pressure does not compromise safety",
"Safety priority messaged by leadership",
"Supervision appropriate",
"Pace appropriate/safe",
"Safety performance recognized",
],
}
# Random comments
comments_pool = [
"No major issues observed.",
"Corrective actions were taken immediately.",
"Workers followed safety protocols.",
"Area was well-maintained.",
"Minor deviations flagged for correction.",
"Immediate action required for identified hazards.",
]
# Set random seed
def set_seed(seed_value):
random.seed(seed_value)
# Generate observations with statuses
def generate_section_observations(section_name):
results = []
for obs in observations[section_name]:
status = random.choice(["Good Practice", "Deviation", "NA"])
comment = random.choice(comments_pool) if status != "NA" else ""
results.append((obs, status, comment))
return results
# AI-based SOC report generation
def generate_soc(model_choice, severity, seed=None):
if seed:
set_seed(seed)
# Update model if selection changes
global generator
model_name = model_options[model_choice]
if generator.tokenizer.name_or_path != model_name:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
# Random selections for fields
observer_name = random.choice(names)
work_type = random.choice(work_types)
location = random.choice(locations)
duration = random.choice(durations)
# Severity description
severity_levels = {
1: "minor concerns and deviations.",
2: "moderate safety concerns requiring attention.",
3: "serious safety issues needing urgent action.",
}
severity_text = severity_levels.get(severity, "moderate safety concerns.")
# AI Prompt
prompt = f"""
Generate a Safety Observation and Conversation (SOC) report for the following details:
Observer's Name: {observer_name}
KOC ID No.: [Insert KOC ID here]
Type of Work Observed: {work_type}
Location: {location}
Duration: {duration} minutes
Severity: {severity_text}
Include:
- Key safety conclusions.
- Randomized observations for Plant, People, Process, and Performance sections.
Format output similar to the MyHSSE layout with numbered observations and status.
"""
# Generate the main report body
report = generator(prompt, max_length=1024, num_return_sequences=1)[0]["generated_text"]
# Random observations
plant_obs = generate_section_observations("Plant")
people_obs = generate_section_observations("People")
process_obs = generate_section_observations("Process")
performance_obs = generate_section_observations("Performance")
def format_section(section_title, observations):
formatted = f"\n{section_title} Observations:\n"
formatted += "\n".join(
f"{i+1}. {obs[0]} - {obs[1]}\n Comments: {obs[2]}" for i, obs in enumerate(observations)
)
return formatted
# Append formatted sections
report += format_section("Plant", plant_obs)
report += format_section("People", people_obs)
report += format_section("Process", process_obs)
report += format_section("Performance", performance_obs)
return report
# Gradio Interface
def app_interface(model_choice, severity, seed=None):
return generate_soc(model_choice, severity, seed)
# Gradio Layout
with gr.Blocks() as app:
gr.Markdown("# AI-Generated Safety Observation and Conversation (SOC) Reports")
gr.Markdown(
"""
Generate detailed SOC reports in the MyHSSE format using AI assistance.
Customize with multiple AI models, severity levels, and reproducibility using seeds.
"""
)
with gr.Row():
model_choice = gr.Dropdown(
label="Select AI Model",
choices=list(model_options.keys()),
value="GPT-Neo 125M",
)
severity_slider = gr.Slider(
label="Severity of SOC Report",
minimum=1,
maximum=3,
step=1,
value=2,
)
seed_input = gr.Number(label="Seed (Optional)", value=None, precision=0)
output_box = gr.Textbox(
label="Generated SOC Report",
placeholder="Your SOC report will appear here...",
lines=30,
)
with gr.Row():
generate_btn = gr.Button("Generate SOC Report")
copy_btn = gr.Button("Copy to Clipboard")
generate_btn.click(app_interface, inputs=[model_choice, severity_slider, seed_input], outputs=output_box)
copy_btn.click(lambda text: text, inputs=output_box, outputs=None)
# Launch the app
app.launch() |