Upload eval.py
Browse files
eval.py
ADDED
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This code is modified from the original work available at:
|
2 |
+
# https://github.com/TIGER-AI-Lab/MMLU-Pro
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# Changes made:
|
16 |
+
# - Updated `eval.py` logic for our dataset.
|
17 |
+
|
18 |
+
|
19 |
+
import os
|
20 |
+
import json
|
21 |
+
from tqdm import tqdm
|
22 |
+
import time
|
23 |
+
from datasets import load_dataset
|
24 |
+
import argparse
|
25 |
+
import pandas as pd
|
26 |
+
import base64
|
27 |
+
from PIL import Image
|
28 |
+
from io import BytesIO
|
29 |
+
import ast
|
30 |
+
|
31 |
+
OPENAI_API_KEY = ""
|
32 |
+
GEMINI_API_KEY = ""
|
33 |
+
|
34 |
+
# The 5-shot examples are taken from MMLU Maths and Physics questions.
|
35 |
+
ms_prompt="""
|
36 |
+
Diberikan contoh-contoh berikut:
|
37 |
+
|
38 |
+
Soalan: Glukosa diangkut ke dalam sel otot:
|
39 |
+
Pilihan:
|
40 |
+
A. melalui pengangkut protein yang dipanggil GLUT4.
|
41 |
+
B. hanya dengan kehadiran insulin.
|
42 |
+
C. melalui hexokinase.
|
43 |
+
D. melalui pengangkut asid monokarbilik.
|
44 |
+
|
45 |
+
Jawapan: A
|
46 |
+
|
47 |
+
Soalan: Jika sebuah pentagon P dengan bucu-bucu di (–2, –4), (–4, 1), (–1, 4), (2, 4), dan (3, 0) dipantulkan merentasi garis y = x untuk mendapatkan pentagon baru, P’, maka salah satu bucu P’ ialah
|
48 |
+
Pilihan:
|
49 |
+
A. (0, –3)
|
50 |
+
B. (4, 1)
|
51 |
+
C. (2, 2)
|
52 |
+
D. (–4, –2)
|
53 |
+
|
54 |
+
Jawapan: D
|
55 |
+
|
56 |
+
Soalan: John membahagikan pin topi cenderamatanya kepada dua timbunan. Kedua-dua timbunan mempunyai bilangan pin yang sama. Dia memberikan kepada abangnya separuh daripada satu pertiga salah satu timbunan. John mempunyai 66 pin yang tinggal. Berapakah bilangan pin yang John miliki pada asalnya?
|
57 |
+
Pilihan:
|
58 |
+
A. 396
|
59 |
+
B. 72
|
60 |
+
C. 66
|
61 |
+
D. 36
|
62 |
+
|
63 |
+
Jawapan: B
|
64 |
+
|
65 |
+
Soalan: Sebuah sfera pejal (I = 0.06 kg·m^2) berputar bebas mengelilingi paksi melalui pusatnya pada kelajuan sudut 20 rad/s. Dikehendaki menghentikan sfera tersebut dengan menggunakan daya geseran sebesar 2.0 N di permukaan luar sfera, pada jarak 0.30 m dari pusat sfera. Berapa lamakah masa yang diambil untuk menghentikan sfera tersebut?
|
66 |
+
Pilihan:
|
67 |
+
A. 4 s
|
68 |
+
B. 2 s
|
69 |
+
C. 0.06 s
|
70 |
+
D. 0.03 s
|
71 |
+
|
72 |
+
Jawapan: B
|
73 |
+
|
74 |
+
Soalan: Cahaya ultraviolet mempunyai panjang gelombang sekitar 6 × 10^-8 m. Apakah frekuensi cahaya ini?
|
75 |
+
Pilihan:
|
76 |
+
A. 5 × 10^15 Hz
|
77 |
+
B. 0.5 Hz
|
78 |
+
C. 2 Hz
|
79 |
+
D. 20 Hz
|
80 |
+
|
81 |
+
Jawapan: A
|
82 |
+
|
83 |
+
Berikut adalah soalan pilihan berganda. Pilih jawapan yang betul daripada pilihan 'A', 'B', 'C', atau 'D'.
|
84 |
+
Jawab dengan hanya huruf pilihan yang betul. Jangan berikan sebarang penjelasan atau teks tambahan.
|
85 |
+
Jawapan hendaklah hanya salah satu daripada ini: 'A', 'B', 'C', 'D'.\n\n
|
86 |
+
"""
|
87 |
+
|
88 |
+
en_prompt = """
|
89 |
+
Given the examples:
|
90 |
+
|
91 |
+
Question: Glucose is transported into the muscle cell:
|
92 |
+
Choices:
|
93 |
+
A. via protein transporters called GLUT4.
|
94 |
+
B. only in the presence of insulin.
|
95 |
+
C. via hexokinase.
|
96 |
+
D. via monocarbylic acid transporters.
|
97 |
+
|
98 |
+
Answer: A
|
99 |
+
|
100 |
+
Question: If a pentagon P with vertices at (– 2, – 4), (– 4, 1), (–1, 4), (2, 4), and (3, 0) is reflected across the line y = x to get a new pentagon, P’, then one of the vertices of P’ is
|
101 |
+
Choices:
|
102 |
+
A. (0, – 3)
|
103 |
+
B. (4, 1)
|
104 |
+
C. (2, 2)
|
105 |
+
D. (– 4, –2)
|
106 |
+
|
107 |
+
Answer: D
|
108 |
+
|
109 |
+
Question: John divided his souvenir hat pins into two piles. The two piles had an equal number of pins. He gave his brother one-half of one-third of one pile. John had 66 pins left. How many pins did John originally have?
|
110 |
+
Choices:
|
111 |
+
A. 396
|
112 |
+
B. 72
|
113 |
+
C. 66
|
114 |
+
D. 36
|
115 |
+
|
116 |
+
Answer: B
|
117 |
+
|
118 |
+
Question: A solid sphere (I = 0.06 kg·m^2) spins freely around an axis through its center at an angular speed of 20 rad/s. It is desired to bring the sphere to rest by applying a friction force of magnitude 2.0 N to the sphere’s outer surface, a distance of 0.30 m from the sphere’s center. How much time will it take the sphere to come to rest?
|
119 |
+
Choices:
|
120 |
+
A. 4 s
|
121 |
+
B. 2 s
|
122 |
+
C. 0.06 s
|
123 |
+
D. 0.03 s
|
124 |
+
|
125 |
+
Answer: B
|
126 |
+
|
127 |
+
Question: Ultraviolet light has a wavelength of about 6 × 10^-8 m. What is the frequency of this light?
|
128 |
+
Choices:
|
129 |
+
A. 5 × 10^15 Hz
|
130 |
+
B. 0.5 Hz
|
131 |
+
C. 2 Hz
|
132 |
+
D. 20 Hz
|
133 |
+
|
134 |
+
Answer: A
|
135 |
+
|
136 |
+
The following are multiple choice questions. Choose the correct answer from the options 'A', 'B', 'C', or 'D'.
|
137 |
+
Answer with only the letter of the correct option. Do not provide any extra explanation or text.
|
138 |
+
The answer should only be one of these: 'A', 'B', 'C', 'D'.\n\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
def get_client():
|
142 |
+
if args.model_name in ["gpt-4o-mini", "gpt-4o"]:
|
143 |
+
import openai
|
144 |
+
openai.api_key = OPENAI_API_KEY
|
145 |
+
client = openai
|
146 |
+
elif args.model_name in ["gemini-2.0-flash-exp", "gemini-1.5-flash"]:
|
147 |
+
import google.generativeai as genai
|
148 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
149 |
+
generation_config = {
|
150 |
+
"temperature": 0.0,
|
151 |
+
"top_p": 0.1,
|
152 |
+
"max_output_tokens": 1,
|
153 |
+
"response_mime_type": "text/plain",
|
154 |
+
}
|
155 |
+
client = genai.GenerativeModel(
|
156 |
+
model_name=args.model_name,
|
157 |
+
generation_config=generation_config,
|
158 |
+
)
|
159 |
+
else:
|
160 |
+
client = None
|
161 |
+
print("For other model API calls, please implement the client definition method yourself.")
|
162 |
+
return client
|
163 |
+
|
164 |
+
|
165 |
+
def call_api(client, instruction, inputs):
|
166 |
+
start = time.time()
|
167 |
+
if args.model_name in ["gpt-4o-mini", "gpt-4o"]:
|
168 |
+
message_text = [{"role": "user", "content": instruction + inputs}]
|
169 |
+
completion = client.chat.completions.create(
|
170 |
+
model=args.model_name,
|
171 |
+
messages=message_text,
|
172 |
+
temperature=0,
|
173 |
+
max_tokens=1,
|
174 |
+
top_p=0.1,
|
175 |
+
)
|
176 |
+
result = completion.choices[0].message.content
|
177 |
+
elif args.model_name in ["gemini-2.0-flash-exp", "gemini-1.5-flash"]:
|
178 |
+
response = client.generate_content([instruction, inputs])
|
179 |
+
result = response.text
|
180 |
+
else:
|
181 |
+
print("For other model API calls, please implement the request method yourself.")
|
182 |
+
result = None
|
183 |
+
print("cost time", time.time() - start)
|
184 |
+
return result
|
185 |
+
|
186 |
+
|
187 |
+
def call_api_figures(client, instruction, inputs, figures):
|
188 |
+
start = time.time()
|
189 |
+
if args.model_name in ["gpt-4o-mini", "gpt-4o"]:
|
190 |
+
content = [{"type": "text", "text": instruction + inputs}]
|
191 |
+
for figure in figures:
|
192 |
+
content.append({
|
193 |
+
"type": "image_url",
|
194 |
+
"image_url": {"url": f"data:image/jpeg;base64,{encode_image(figure)}"}
|
195 |
+
})
|
196 |
+
message_text = [{"role": "user", "content": content}]
|
197 |
+
completion = client.chat.completions.create(
|
198 |
+
model=args.model_name,
|
199 |
+
messages=message_text,
|
200 |
+
temperature=0,
|
201 |
+
max_tokens=1,
|
202 |
+
top_p=0.1
|
203 |
+
)
|
204 |
+
result = completion.choices[0].message.content
|
205 |
+
elif args.model_name in ["gemini-2.0-flash-exp", "gemini-1.5-flash"]:
|
206 |
+
content = [instruction, inputs]
|
207 |
+
for figure in figures:
|
208 |
+
content.append(figure)
|
209 |
+
response = client.generate_content(content)
|
210 |
+
result = response.text
|
211 |
+
else:
|
212 |
+
print("For other model API calls, please implement the request method yourself.")
|
213 |
+
result = None
|
214 |
+
print("cost time", time.time() - start)
|
215 |
+
return result
|
216 |
+
|
217 |
+
|
218 |
+
# Function to encode the image to base64
|
219 |
+
def encode_image(image):
|
220 |
+
# Check if the image is in RGBA mode and convert it to RGB
|
221 |
+
if image.mode == "RGBA":
|
222 |
+
image = image.convert("RGB")
|
223 |
+
buffered = BytesIO()
|
224 |
+
image.save(buffered, format="JPEG") # Save image as JPEG
|
225 |
+
return base64.b64encode(buffered.getvalue()).decode("utf-8") # Return base64 string
|
226 |
+
|
227 |
+
|
228 |
+
def format_question(question_text, options_str, language):
|
229 |
+
# Parse the string into a Python list
|
230 |
+
options = ast.literal_eval(options_str)
|
231 |
+
if language == 'en':
|
232 |
+
question = f"Question: {question_text}\nOptions:\n"
|
233 |
+
for opt in options:
|
234 |
+
question += f"{opt}\n"
|
235 |
+
question += "Answer: "
|
236 |
+
elif language == 'ms':
|
237 |
+
question = f"Soalan: {question_text}\nPilihan:\n"
|
238 |
+
for opt in options:
|
239 |
+
question += f"{opt}\n"
|
240 |
+
question += "Jawapan: "
|
241 |
+
return question
|
242 |
+
|
243 |
+
|
244 |
+
def single_request(client, single_question, with_figure, language):
|
245 |
+
question = single_question["Questions"]
|
246 |
+
options = single_question["Options"]
|
247 |
+
|
248 |
+
if language == 'en':
|
249 |
+
prompt = en_prompt
|
250 |
+
elif language == 'ms':
|
251 |
+
prompt = ms_prompt
|
252 |
+
|
253 |
+
input_text = format_question(question, options, language)
|
254 |
+
|
255 |
+
retries = 5
|
256 |
+
delay = 15
|
257 |
+
attempt = 0
|
258 |
+
|
259 |
+
if with_figure:
|
260 |
+
figures_data = single_question["Label"]
|
261 |
+
pairs = [entry.strip() for item in figures_data for entry in item.split(",")]
|
262 |
+
figure_labels = [{"label": label.strip(), "path": path.strip()} for label, path in [pair.split(":") for pair in pairs]]
|
263 |
+
figures = single_question["Figures"]
|
264 |
+
|
265 |
+
prompt += "".join([f"Figure {index}: {figure['label']}\n" for index, figure in enumerate(figure_labels)])
|
266 |
+
|
267 |
+
while attempt < retries:
|
268 |
+
try:
|
269 |
+
response = call_api_figures(client, prompt, input_text, figures)
|
270 |
+
if response:
|
271 |
+
response = response.replace('**', '')
|
272 |
+
return response, response
|
273 |
+
except Exception as e:
|
274 |
+
print(f"Error: {e}")
|
275 |
+
attempt += 1
|
276 |
+
if attempt < retries:
|
277 |
+
print(f"Retrying in {delay:.2f} seconds...")
|
278 |
+
time.sleep(delay)
|
279 |
+
return None, f"Failed after {retries} retries."
|
280 |
+
|
281 |
+
else:
|
282 |
+
while attempt < retries:
|
283 |
+
try:
|
284 |
+
response = call_api(client, prompt, input_text)
|
285 |
+
if response:
|
286 |
+
response = response.replace('**', '')
|
287 |
+
return response, response
|
288 |
+
except Exception as e:
|
289 |
+
print(f"Error: {e}")
|
290 |
+
attempt += 1
|
291 |
+
if attempt < retries:
|
292 |
+
print(f"Retrying in {delay:.2f} seconds...")
|
293 |
+
time.sleep(delay)
|
294 |
+
return None, f"Failed after {retries} retries."
|
295 |
+
|
296 |
+
|
297 |
+
def evaluate(language, with_figure=False):
|
298 |
+
client = get_client()
|
299 |
+
|
300 |
+
# Load dataset from Hugging Face
|
301 |
+
dataset_name = "Supa-AI/STEM-en-ms"
|
302 |
+
dataset = load_dataset(dataset_name, name=f"data_{language}", split="eval")
|
303 |
+
|
304 |
+
# Convert to pandas DataFrame
|
305 |
+
data = pd.DataFrame(dataset)
|
306 |
+
|
307 |
+
# Split the dataset into two parts: with figures and without figures
|
308 |
+
data_with_figures = data[data["Figures"].apply(lambda x: isinstance(x, list) and len(x) > 0)]
|
309 |
+
data_without_figures = data[data["Figures"].apply(lambda x: isinstance(x, list) and len(x) == 0)]
|
310 |
+
|
311 |
+
if with_figure:
|
312 |
+
test_data = data_with_figures
|
313 |
+
suffix = f"{args.model_name}_{language}_wfigures".split("/", 1)[-1]
|
314 |
+
else:
|
315 |
+
test_data = data_without_figures
|
316 |
+
suffix = f"{args.model_name}_{language}_wofigures".split("/", 1)[-1]
|
317 |
+
|
318 |
+
output_res_path = os.path.join(args.output_dir, suffix + "_result.json")
|
319 |
+
|
320 |
+
total_questions = len(test_data) # Total includes all questions
|
321 |
+
|
322 |
+
# Load existing results if available
|
323 |
+
if os.path.exists(output_res_path):
|
324 |
+
with open(output_res_path, "r", encoding="utf-8") as f:
|
325 |
+
existing_results = json.load(f)
|
326 |
+
processed_ids = {entry["FileName"] for entry in existing_results}
|
327 |
+
# Count correct predictions from existing results
|
328 |
+
correct_predictions_existing = sum(1 for entry in existing_results if entry.get("pred") == entry.get("Answers"))
|
329 |
+
else:
|
330 |
+
existing_results = []
|
331 |
+
processed_ids = set()
|
332 |
+
correct_predictions_existing = 0
|
333 |
+
|
334 |
+
# Filter out already processed entries
|
335 |
+
test_data = test_data[~test_data["FileName"].isin(processed_ids)]
|
336 |
+
|
337 |
+
res = existing_results
|
338 |
+
correct_predictions_new = 0
|
339 |
+
|
340 |
+
for _, each in tqdm(test_data.iterrows(), total=len(test_data)):
|
341 |
+
label = each["Answers"]
|
342 |
+
# if len(each["Figures"]) > 1: continue
|
343 |
+
pred, response = single_request(client, each, with_figure, language)
|
344 |
+
if response is not None:
|
345 |
+
each["pred"] = pred
|
346 |
+
each["model_outputs"] = response
|
347 |
+
if pred is not None and pred == label:
|
348 |
+
correct_predictions_new += 1
|
349 |
+
res.append(each.to_dict())
|
350 |
+
save_res(res, output_res_path) # Save results incrementally
|
351 |
+
print(f"FileName: {each["FileName"]}, Answer: {each["Answers"]}, Prediction: {each["pred"]}")
|
352 |
+
|
353 |
+
# Calculate accuracy
|
354 |
+
correct_predictions_total = correct_predictions_existing + correct_predictions_new
|
355 |
+
print("Total Question: ", total_questions)
|
356 |
+
print("Correct Predictions Exist: ", correct_predictions_existing)
|
357 |
+
print("Correct Predictions New: ", correct_predictions_new)
|
358 |
+
accuracy = correct_predictions_total / total_questions if total_questions > 0 else 0
|
359 |
+
print(f"Accuracy: {accuracy:.2%}")
|
360 |
+
|
361 |
+
|
362 |
+
def remove_images_from_res(res):
|
363 |
+
"""Recursively removes image objects from the result dictionary."""
|
364 |
+
if isinstance(res, dict):
|
365 |
+
for key, value in res.items():
|
366 |
+
if isinstance(value, Image.Image):
|
367 |
+
res[key] = "Image is not saved" # Replace image objects with a placeholder
|
368 |
+
elif isinstance(value, (dict, list)):
|
369 |
+
remove_images_from_res(value) # Recursively process nested structures
|
370 |
+
elif isinstance(res, list):
|
371 |
+
for i in range(len(res)):
|
372 |
+
if isinstance(res[i], Image.Image):
|
373 |
+
res[i] = "Image is not saved" # Replace image objects with a placeholder
|
374 |
+
elif isinstance(res[i], (dict, list)):
|
375 |
+
remove_images_from_res(res[i]) # Recursively process nested structures
|
376 |
+
return res
|
377 |
+
|
378 |
+
|
379 |
+
def save_res(res, output_res_path):
|
380 |
+
"""Save the result to a file, excluding images."""
|
381 |
+
os.makedirs(os.path.dirname(output_res_path), exist_ok=True)
|
382 |
+
res = remove_images_from_res(res) # Remove images from the result
|
383 |
+
with open(output_res_path, "w", encoding="utf-8") as fo:
|
384 |
+
fo.write(json.dumps(res, indent=4, ensure_ascii=False))
|
385 |
+
|
386 |
+
|
387 |
+
if __name__ == "__main__":
|
388 |
+
parser = argparse.ArgumentParser()
|
389 |
+
parser.add_argument("--output_dir", "-o", type=str, default="eval_results/")
|
390 |
+
parser.add_argument("--model_name", "-m", type=str, default="gpt-4o",
|
391 |
+
choices=["gpt-4o-mini", "gpt-4o", # OPENAI
|
392 |
+
"gemini-2.0-flash-exp", "gemini-1.5-flash", # GEMINI
|
393 |
+
])
|
394 |
+
parser.add_argument("--language", "-l", type=str, default="en")
|
395 |
+
parser.add_argument("--with_figures", "-f", type=bool, default=False)
|
396 |
+
args = parser.parse_args()
|
397 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
398 |
+
evaluate(args.language, args.with_figures)
|