|
import json |
|
import os |
|
from typing import Dict, List |
|
from datetime import datetime |
|
|
|
def get_model_names() -> List[str]: |
|
"""从evaluation_results文件夹获取所有模型名称""" |
|
model_names = [] |
|
for item in os.listdir('.'): |
|
if item.startswith('evaluation_results.') and os.path.isdir(item): |
|
model_name = item.replace('evaluation_results.', '') |
|
model_names.append(model_name) |
|
return sorted(model_names) |
|
|
|
def load_report(model_name: str) -> Dict: |
|
"""加载模型的评测报告""" |
|
report_path = f"evaluation_results.{model_name}/evaluation_report.json" |
|
with open(report_path, 'r', encoding='utf-8') as f: |
|
return json.load(f) |
|
|
|
def format_percentage(value: float) -> str: |
|
"""格式化百分比显示""" |
|
return f"{value*100:.2f}%" |
|
|
|
def generate_markdown_report(): |
|
"""生成markdown格式的评测报告""" |
|
|
|
models = get_model_names() |
|
|
|
|
|
reports = {model: load_report(model) for model in models} |
|
|
|
|
|
report = [] |
|
|
|
|
|
report.append("# 中国古诗词大模型评测报告") |
|
report.append(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") |
|
|
|
|
|
report.append(f"评测模型: {', '.join(models)}\n") |
|
|
|
|
|
report.append("## 1. 整体表现对比") |
|
|
|
|
|
all_types = sorted(set( |
|
q_type |
|
for data in reports.values() |
|
for q_type in data['by_type'].keys() |
|
)) |
|
|
|
|
|
headers = ["模型"] + all_types + ["总计", "总正确率"] |
|
report.append("\n| " + " | ".join(headers) + " |") |
|
report.append("| " + " | ".join(["----" for _ in headers]) + " |") |
|
|
|
|
|
sorted_models = sorted(reports.items(), |
|
key=lambda x: x[1]['overall']['accuracy'], |
|
reverse=True) |
|
|
|
|
|
for model, data in sorted_models: |
|
row = [model] |
|
|
|
for q_type in all_types: |
|
if q_type in data['by_type']: |
|
metrics = data['by_type'][q_type] |
|
row.append(f"{metrics['correct']}/{metrics['total']}") |
|
else: |
|
row.append("0/0") |
|
|
|
|
|
overall = data['overall'] |
|
row.append(f"{overall['correct']}/{overall['total']}") |
|
row.append(format_percentage(overall['accuracy'])) |
|
|
|
report.append("| " + " | ".join(row) + " |") |
|
report.append("") |
|
|
|
|
|
report.append("## 2. 题型分类表现") |
|
report.append("\n| 模型 | 题型 | 总题数 | 正确数 | 准确率 |") |
|
report.append("| --- | --- | --- | --- | --- |") |
|
|
|
|
|
all_types = sorted(set( |
|
q_type |
|
for data in reports.values() |
|
for q_type in data['by_type'].keys() |
|
)) |
|
|
|
for q_type in all_types: |
|
type_results = [] |
|
for model, data in reports.items(): |
|
if q_type in data['by_type']: |
|
metrics = data['by_type'][q_type] |
|
type_results.append((model, metrics)) |
|
|
|
|
|
sorted_results = sorted(type_results, |
|
key=lambda x: x[1]['accuracy'], |
|
reverse=True) |
|
|
|
for model, metrics in sorted_results: |
|
report.append(f"| {model} | {q_type} | {metrics['total']} | {metrics['correct']} | {format_percentage(metrics['accuracy'])} |") |
|
report.append("| --- | --- | --- | --- | --- |") |
|
report.append("") |
|
|
|
|
|
report.append("## 3. 难度分布表现") |
|
report.append("\n| 模型 | 难度 | 总题数 | 正确数 | 准确率 |") |
|
report.append("| --- | --- | --- | --- | --- |") |
|
|
|
|
|
difficulty_order = ['easy', 'medium', 'hard'] |
|
|
|
for diff in difficulty_order: |
|
diff_results = [] |
|
for model, data in reports.items(): |
|
if diff in data['by_difficulty']: |
|
metrics = data['by_difficulty'][diff] |
|
diff_results.append((model, metrics)) |
|
|
|
|
|
sorted_results = sorted(diff_results, |
|
key=lambda x: x[1]['accuracy'], |
|
reverse=True) |
|
|
|
for model, metrics in sorted_results: |
|
report.append(f"| {model} | {diff} | {metrics['total']} | {metrics['correct']} | {format_percentage(metrics['accuracy'])} |") |
|
report.append("| --- | --- | --- | --- | --- |") |
|
report.append("") |
|
|
|
|
|
report.append("## 4. 朝代分布表现") |
|
report.append("\n| 模型 | 朝代 | 总题数 | 正确数 | 准确率 |") |
|
report.append("| --- | --- | --- | --- | --- |") |
|
|
|
|
|
all_dynasties = sorted(set( |
|
dynasty if dynasty else "未知" |
|
for data in reports.values() |
|
for dynasty in data['by_dynasty'].keys() |
|
)) |
|
|
|
for dynasty in all_dynasties: |
|
dynasty_results = [] |
|
for model, data in reports.items(): |
|
if dynasty in data['by_dynasty'] or (dynasty == "未知" and None in data['by_dynasty']): |
|
metrics = data['by_dynasty'][None if dynasty == "未知" else dynasty] |
|
dynasty_results.append((model, metrics)) |
|
|
|
|
|
sorted_results = sorted(dynasty_results, |
|
key=lambda x: x[1]['accuracy'], |
|
reverse=True) |
|
|
|
for model, metrics in sorted_results: |
|
report.append(f"| {model} | {dynasty} | {metrics['total']} | {metrics['correct']} | {format_percentage(metrics['accuracy'])} |") |
|
report.append("| --- | --- | --- | --- | --- |") |
|
report.append("") |
|
|
|
|
|
report.append("## 5. 结论分析") |
|
report.append("\n### 5.1 整体表现") |
|
|
|
|
|
best_model = max(reports.items(), key=lambda x: x[1]['overall']['accuracy']) |
|
report.append(f"- 最佳表现模型: {best_model[0]}, 整体准确率 {format_percentage(best_model[1]['overall']['accuracy'])}") |
|
|
|
|
|
report.append("\n### 5.2 分维度最佳表现") |
|
|
|
|
|
report.append("\n#### 题型维度:") |
|
for q_type in all_types: |
|
best = max(reports.items(), |
|
key=lambda x: x[1]['by_type'][q_type]['accuracy']) |
|
report.append(f"- {q_type}: {best[0]} ({format_percentage(best[1]['by_type'][q_type]['accuracy'])})") |
|
|
|
|
|
report.append("\n#### 难度维度:") |
|
for diff in difficulty_order: |
|
best = max(reports.items(), |
|
key=lambda x: x[1]['by_difficulty'][diff]['accuracy']) |
|
report.append(f"- {diff}: {best[0]} ({format_percentage(best[1]['by_difficulty'][diff]['accuracy'])})") |
|
|
|
|
|
with open('evaluation_report.md', 'w', encoding='utf-8') as f: |
|
f.write('\n'.join(report)) |
|
|
|
if __name__ == '__main__': |
|
generate_markdown_report() |