happyme531's picture
Upload 5 files
1e012a3 verified
import json
import os
from typing import Dict, List
from datetime import datetime
def get_model_names() -> List[str]:
"""从evaluation_results文件夹获取所有模型名称"""
model_names = []
for item in os.listdir('.'):
if item.startswith('evaluation_results.') and os.path.isdir(item):
model_name = item.replace('evaluation_results.', '')
model_names.append(model_name)
return sorted(model_names) # 排序以保持顺序一致
def load_report(model_name: str) -> Dict:
"""加载模型的评测报告"""
report_path = f"evaluation_results.{model_name}/evaluation_report.json"
with open(report_path, 'r', encoding='utf-8') as f:
return json.load(f)
def format_percentage(value: float) -> str:
"""格式化百分比显示"""
return f"{value*100:.2f}%"
def generate_markdown_report():
"""生成markdown格式的评测报告"""
# 动态获取模型列表
models = get_model_names()
# 加载所有报告
reports = {model: load_report(model) for model in models}
# 生成报告内容
report = []
# 标题和时间
report.append("# 中国古诗词大模型评测报告")
report.append(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
# 添加模型信息
report.append(f"评测模型: {', '.join(models)}\n")
# 1. 整体表现对比
report.append("## 1. 整体表现对比")
# 获取所有题型
all_types = sorted(set(
q_type
for data in reports.values()
for q_type in data['by_type'].keys()
))
# 生成表头
headers = ["模型"] + all_types + ["总计", "总正确率"]
report.append("\n| " + " | ".join(headers) + " |")
report.append("| " + " | ".join(["----" for _ in headers]) + " |")
# 按总正确率排序
sorted_models = sorted(reports.items(),
key=lambda x: x[1]['overall']['accuracy'],
reverse=True)
# 生成每个模型的数据行
for model, data in sorted_models:
row = [model]
# 添加每个题型的数据
for q_type in all_types:
if q_type in data['by_type']:
metrics = data['by_type'][q_type]
row.append(f"{metrics['correct']}/{metrics['total']}")
else:
row.append("0/0")
# 添加总计和总正确率
overall = data['overall']
row.append(f"{overall['correct']}/{overall['total']}")
row.append(format_percentage(overall['accuracy']))
report.append("| " + " | ".join(row) + " |")
report.append("")
# 2. 按题型分类表现
report.append("## 2. 题型分类表现")
report.append("\n| 模型 | 题型 | 总题数 | 正确数 | 准确率 |")
report.append("| --- | --- | --- | --- | --- |")
# 获取所有题型
all_types = sorted(set(
q_type
for data in reports.values()
for q_type in data['by_type'].keys()
))
for q_type in all_types:
type_results = []
for model, data in reports.items():
if q_type in data['by_type']:
metrics = data['by_type'][q_type]
type_results.append((model, metrics))
# 按准确率排序
sorted_results = sorted(type_results,
key=lambda x: x[1]['accuracy'],
reverse=True)
for model, metrics in sorted_results:
report.append(f"| {model} | {q_type} | {metrics['total']} | {metrics['correct']} | {format_percentage(metrics['accuracy'])} |")
report.append("| --- | --- | --- | --- | --- |") # 添加分隔线
report.append("")
# 3. 难度分布表现
report.append("## 3. 难度分布表现")
report.append("\n| 模型 | 难度 | 总题数 | 正确数 | 准确率 |")
report.append("| --- | --- | --- | --- | --- |")
# 难度顺序
difficulty_order = ['easy', 'medium', 'hard']
for diff in difficulty_order:
diff_results = []
for model, data in reports.items():
if diff in data['by_difficulty']:
metrics = data['by_difficulty'][diff]
diff_results.append((model, metrics))
# 按准确率排序
sorted_results = sorted(diff_results,
key=lambda x: x[1]['accuracy'],
reverse=True)
for model, metrics in sorted_results:
report.append(f"| {model} | {diff} | {metrics['total']} | {metrics['correct']} | {format_percentage(metrics['accuracy'])} |")
report.append("| --- | --- | --- | --- | --- |") # 添加分隔线
report.append("")
# 4. 朝代分布表现
report.append("## 4. 朝代分布表现")
report.append("\n| 模型 | 朝代 | 总题数 | 正确数 | 准确率 |")
report.append("| --- | --- | --- | --- | --- |")
# 获取所有朝代并排序
all_dynasties = sorted(set(
dynasty if dynasty else "未知"
for data in reports.values()
for dynasty in data['by_dynasty'].keys()
))
for dynasty in all_dynasties:
dynasty_results = []
for model, data in reports.items():
if dynasty in data['by_dynasty'] or (dynasty == "未知" and None in data['by_dynasty']):
metrics = data['by_dynasty'][None if dynasty == "未知" else dynasty]
dynasty_results.append((model, metrics))
# 按准确率排序
sorted_results = sorted(dynasty_results,
key=lambda x: x[1]['accuracy'],
reverse=True)
for model, metrics in sorted_results:
report.append(f"| {model} | {dynasty} | {metrics['total']} | {metrics['correct']} | {format_percentage(metrics['accuracy'])} |")
report.append("| --- | --- | --- | --- | --- |") # 添加分隔线
report.append("")
# 5. 结论分析
report.append("## 5. 结论分析")
report.append("\n### 5.1 整体表现")
# 计算最佳表现模型
best_model = max(reports.items(), key=lambda x: x[1]['overall']['accuracy'])
report.append(f"- 最佳表现模型: {best_model[0]}, 整体准确率 {format_percentage(best_model[1]['overall']['accuracy'])}")
# 计算各个维度的最佳表现
report.append("\n### 5.2 分维度最佳表现")
# 题型维度
report.append("\n#### 题型维度:")
for q_type in all_types:
best = max(reports.items(),
key=lambda x: x[1]['by_type'][q_type]['accuracy'])
report.append(f"- {q_type}: {best[0]} ({format_percentage(best[1]['by_type'][q_type]['accuracy'])})")
# 难度维度
report.append("\n#### 难度维度:")
for diff in difficulty_order:
best = max(reports.items(),
key=lambda x: x[1]['by_difficulty'][diff]['accuracy'])
report.append(f"- {diff}: {best[0]} ({format_percentage(best[1]['by_difficulty'][diff]['accuracy'])})")
# 写入文件
with open('evaluation_report.md', 'w', encoding='utf-8') as f:
f.write('\n'.join(report))
if __name__ == '__main__':
generate_markdown_report()