Oranblock commited on
Commit
f1e9568
·
verified ·
1 Parent(s): 3715759

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -30
app.py CHANGED
@@ -2,7 +2,7 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
  import random
4
 
5
- # Predefined model options
6
  model_options = {
7
  "distilgpt2": "distilgpt2",
8
  "GPT-Neo 125M": "EleutherAI/gpt-neo-125M",
@@ -13,24 +13,82 @@ model_options = {
13
  default_model_name = model_options["GPT-Neo 125M"]
14
  tokenizer = AutoTokenizer.from_pretrained(default_model_name)
15
  model = AutoModelForCausalLM.from_pretrained(default_model_name)
16
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) # Use CPU
17
 
18
- # Random options for observations
19
  names = ["WPMPOperator 1", "John Doe", "Ali Khan"]
20
  work_types = ["Routine pump maintenance", "Valve inspection", "Chemical handling"]
21
- locations = ["Water Injection Plant - Pump House 2", "Main Valve Station", "Chemical Storage Area"]
22
  durations = [30, 45, 60]
23
 
24
- # Function to set seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  def set_seed(seed_value):
26
  random.seed(seed_value)
27
 
 
 
 
 
 
 
 
 
 
28
  # AI-based SOC report generation
29
  def generate_soc(model_choice, severity, seed=None):
30
  if seed:
31
  set_seed(seed)
32
 
33
- # Update generator if model choice changes
34
  global generator
35
  model_name = model_options[model_choice]
36
  if generator.tokenizer.name_or_path != model_name:
@@ -38,45 +96,58 @@ def generate_soc(model_choice, severity, seed=None):
38
  model = AutoModelForCausalLM.from_pretrained(model_name)
39
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
40
 
41
- # Random selections for the fields
42
  observer_name = random.choice(names)
43
  work_type = random.choice(work_types)
44
  location = random.choice(locations)
45
  duration = random.choice(durations)
46
 
47
- # Adjust tone based on severity slider
48
- severity_description = {
49
- 1: "minor concerns and deviations were observed.",
50
- 2: "moderate concerns requiring immediate attention were identified.",
51
- 3: "serious safety concerns were flagged for urgent corrective action."
52
  }
53
- severity_text = severity_description.get(severity, "moderate concerns requiring attention.")
54
 
55
- # AI prompt
56
  prompt = f"""
57
- Write a detailed Safety Observation and Conversation (SOC) report for a water injection plant with the following details:
58
-
59
  Observer's Name: {observer_name}
60
  KOC ID No.: [Insert KOC ID here]
61
  Type of Work Observed: {work_type}
62
  Location: {location}
63
  Duration: {duration} minutes
 
 
 
 
 
 
64
 
65
- Severity Level: {severity_text}
 
66
 
67
- The report should include:
68
- - Key safety conclusions, concerns, and corrective actions taken.
69
- - Plant observations (e.g., energy control, housekeeping) marked as Good Practice or Deviation with comments.
70
- - People observations (e.g., PPE compliance, hazard understanding).
71
- - Process observations (e.g., job safety analysis, procedures).
72
- - Performance observations (e.g., pace, supervision, and safety prioritization).
73
 
74
- Format the output neatly in sections, and ensure it is professional and actionable.
75
- """
 
 
 
 
 
 
 
 
 
 
76
 
77
- # Generate report using the selected model
78
- result = generator(prompt, max_length=1024, num_return_sequences=1)[0]["generated_text"]
79
- return result
80
 
81
  # Gradio Interface
82
  def app_interface(model_choice, severity, seed=None):
@@ -87,8 +158,8 @@ with gr.Blocks() as app:
87
  gr.Markdown("# AI-Generated Safety Observation and Conversation (SOC) Reports")
88
  gr.Markdown(
89
  """
90
- Generate detailed SOC reports for a water injection plant using AI assistance.
91
- Customize your report with multiple AI models, severity levels, and reproducibility using seeds.
92
  """
93
  )
94
 
 
2
  import gradio as gr
3
  import random
4
 
5
+ # Model options
6
  model_options = {
7
  "distilgpt2": "distilgpt2",
8
  "GPT-Neo 125M": "EleutherAI/gpt-neo-125M",
 
13
  default_model_name = model_options["GPT-Neo 125M"]
14
  tokenizer = AutoTokenizer.from_pretrained(default_model_name)
15
  model = AutoModelForCausalLM.from_pretrained(default_model_name)
16
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
17
 
18
+ # Randomized options for report fields
19
  names = ["WPMPOperator 1", "John Doe", "Ali Khan"]
20
  work_types = ["Routine pump maintenance", "Valve inspection", "Chemical handling"]
21
+ locations = ["Pump House 1", "Main Valve Station", "Chemical Storage Area"]
22
  durations = [30, 45, 60]
23
 
24
+ observations = {
25
+ "Plant": [
26
+ "Energy sources controlled",
27
+ "Plant well maintained",
28
+ "Leaks/spills contained",
29
+ "Protection from hazards in place",
30
+ "Access/egress clear",
31
+ "Layout and work locations safe",
32
+ "Housekeeping standard high",
33
+ ],
34
+ "People": [
35
+ "Work location protected",
36
+ "Work positions safe",
37
+ "People competent",
38
+ "Hazards understood",
39
+ "PPE appropriate",
40
+ "Risk to others avoided",
41
+ "Distractions absent",
42
+ ],
43
+ "Process": [
44
+ "Procedures valid",
45
+ "Correct procedures used",
46
+ "Control of work standard applied",
47
+ "Control of work requirements understood",
48
+ "Risk documented",
49
+ "Job Safety Analysis participation",
50
+ "Work well organized, systematic",
51
+ "Communication effective",
52
+ "Change managed safely",
53
+ ],
54
+ "Performance": [
55
+ "Work pressure does not compromise safety",
56
+ "Safety priority messaged by leadership",
57
+ "Supervision appropriate",
58
+ "Pace appropriate/safe",
59
+ "Safety performance recognized",
60
+ ],
61
+ }
62
+
63
+ # Random comments
64
+ comments_pool = [
65
+ "No major issues observed.",
66
+ "Corrective actions were taken immediately.",
67
+ "Workers followed safety protocols.",
68
+ "Area was well-maintained.",
69
+ "Minor deviations flagged for correction.",
70
+ "Immediate action required for identified hazards.",
71
+ ]
72
+
73
+ # Set random seed
74
  def set_seed(seed_value):
75
  random.seed(seed_value)
76
 
77
+ # Generate observations with statuses
78
+ def generate_section_observations(section_name):
79
+ results = []
80
+ for obs in observations[section_name]:
81
+ status = random.choice(["Good Practice", "Deviation", "NA"])
82
+ comment = random.choice(comments_pool) if status != "NA" else ""
83
+ results.append((obs, status, comment))
84
+ return results
85
+
86
  # AI-based SOC report generation
87
  def generate_soc(model_choice, severity, seed=None):
88
  if seed:
89
  set_seed(seed)
90
 
91
+ # Update model if selection changes
92
  global generator
93
  model_name = model_options[model_choice]
94
  if generator.tokenizer.name_or_path != model_name:
 
96
  model = AutoModelForCausalLM.from_pretrained(model_name)
97
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
98
 
99
+ # Random selections for fields
100
  observer_name = random.choice(names)
101
  work_type = random.choice(work_types)
102
  location = random.choice(locations)
103
  duration = random.choice(durations)
104
 
105
+ # Severity description
106
+ severity_levels = {
107
+ 1: "minor concerns and deviations.",
108
+ 2: "moderate safety concerns requiring attention.",
109
+ 3: "serious safety issues needing urgent action.",
110
  }
111
+ severity_text = severity_levels.get(severity, "moderate safety concerns.")
112
 
113
+ # AI Prompt
114
  prompt = f"""
115
+ Generate a Safety Observation and Conversation (SOC) report for the following details:
 
116
  Observer's Name: {observer_name}
117
  KOC ID No.: [Insert KOC ID here]
118
  Type of Work Observed: {work_type}
119
  Location: {location}
120
  Duration: {duration} minutes
121
+ Severity: {severity_text}
122
+ Include:
123
+ - Key safety conclusions.
124
+ - Randomized observations for Plant, People, Process, and Performance sections.
125
+ Format output similar to the MyHSSE layout with numbered observations and status.
126
+ """
127
 
128
+ # Generate the main report body
129
+ report = generator(prompt, max_length=1024, num_return_sequences=1)[0]["generated_text"]
130
 
131
+ # Random observations
132
+ plant_obs = generate_section_observations("Plant")
133
+ people_obs = generate_section_observations("People")
134
+ process_obs = generate_section_observations("Process")
135
+ performance_obs = generate_section_observations("Performance")
 
136
 
137
+ def format_section(section_title, observations):
138
+ formatted = f"\n{section_title} Observations:\n"
139
+ formatted += "\n".join(
140
+ f"{i+1}. {obs[0]} - {obs[1]}\n Comments: {obs[2]}" for i, obs in enumerate(observations)
141
+ )
142
+ return formatted
143
+
144
+ # Append formatted sections
145
+ report += format_section("Plant", plant_obs)
146
+ report += format_section("People", people_obs)
147
+ report += format_section("Process", process_obs)
148
+ report += format_section("Performance", performance_obs)
149
 
150
+ return report
 
 
151
 
152
  # Gradio Interface
153
  def app_interface(model_choice, severity, seed=None):
 
158
  gr.Markdown("# AI-Generated Safety Observation and Conversation (SOC) Reports")
159
  gr.Markdown(
160
  """
161
+ Generate detailed SOC reports in the MyHSSE format using AI assistance.
162
+ Customize with multiple AI models, severity levels, and reproducibility using seeds.
163
  """
164
  )
165