|
import json |
|
import openai |
|
import google.generativeai as genai |
|
import time |
|
from collections import defaultdict |
|
from typing import Dict, List |
|
import os |
|
from tqdm import tqdm |
|
import argparse |
|
|
|
class PoetryEvaluator: |
|
def __init__(self, api_key: str, provider: str = "openai", model: str = "gpt-3.5-turbo", |
|
dry_run: bool = False, delay: float = 0.5, max_retries: int = 3, retry_delay: float = 1.0): |
|
"""初始化评测器 |
|
Args: |
|
api_key: API密钥 |
|
provider: API提供商 ("openai" 或 "google") |
|
model: 模型名称 |
|
dry_run: 是否为演示模式 |
|
delay: API调用间隔时间(秒) |
|
max_retries: 最大重试次数 |
|
retry_delay: 重试等待时间(秒) |
|
""" |
|
self.api_key = api_key |
|
self.provider = provider |
|
self.model = model |
|
self.dry_run = dry_run |
|
self.delay = delay |
|
self.max_retries = max_retries |
|
self.retry_delay = retry_delay |
|
self.results = defaultdict(list) |
|
|
|
|
|
if provider == "google": |
|
genai.configure(api_key=api_key) |
|
self.generation_config = { |
|
"temperature": 0, |
|
"top_p": 0.95, |
|
"top_k": 64, |
|
"max_output_tokens": 8192, |
|
} |
|
self.google_model = genai.GenerativeModel( |
|
model_name=model, |
|
generation_config=self.generation_config |
|
) |
|
|
|
def load_benchmark(self, jsonl_path: str) -> List[Dict]: |
|
"""加载评测数据集""" |
|
questions = [] |
|
with open(jsonl_path, 'r', encoding='utf-8') as f: |
|
for line in f: |
|
if line.strip(): |
|
questions.append(json.loads(line)) |
|
return questions |
|
|
|
def generate_prompt(self, question: Dict) -> str: |
|
"""根据题型生成提示词""" |
|
q_type = question['type'] |
|
author = question['metadata']['author'] |
|
content = question['content']['question'] |
|
|
|
prompts = { |
|
'couplet': "请将下面的诗句补充完整。只需要回答括号()之内的内容,不需要进行解释。\n\n", |
|
'hint_words': "请将以下包含星号的诗句补充完整。回答补充后的完整诗句,不需要进行解释。\n\n", |
|
'find_poetry': "请从以下给出的多行诗句中,从每一行提取出一个字,组成一句有效的诗句。只需要回答找到的诗句,不需要进行解释。\n\n", |
|
'blank_filling': "请将下面的诗句补充完整。只需要回答括号()之内的内容,不需要进行解释。\n\n", |
|
'first_last_words': "请将以下包含星号的诗句补充完整。回答补充后的完整诗句,不需要进行解释。\n\n" |
|
} |
|
if author != None: |
|
return prompts[q_type] + f"{author}: {content}" |
|
else: |
|
return prompts[q_type] + content |
|
|
|
def normalize_answer(self, answer: str) -> str: |
|
"""标准化答案格式""" |
|
return answer.strip().replace('。', '').replace(',', '') |
|
|
|
def evaluate_answer(self, prediction: str, ground_truth: str) -> bool: |
|
"""评估答案是否正确 |
|
只要模型输出中包含正确答案就算对 |
|
""" |
|
pred = self.normalize_answer(prediction) |
|
truth = self.normalize_answer(ground_truth) |
|
|
|
|
|
if '/' in truth: |
|
possible_answers = [ans.strip() for ans in truth.split('/')] |
|
return any(ans in pred for ans in possible_answers) |
|
|
|
|
|
return truth in pred |
|
|
|
def call_api_with_retry(self, prompt: str) -> str: |
|
"""带重试机制的API调用 |
|
Args: |
|
prompt: 提示词 |
|
Returns: |
|
模型回复文本 |
|
""" |
|
retries = 0 |
|
while True: |
|
try: |
|
if self.provider == "openai": |
|
client = openai.OpenAI( |
|
api_key=self.api_key, |
|
base_url=openai.api_base |
|
) |
|
|
|
response = client.chat.completions.create( |
|
model=self.model, |
|
messages=[ |
|
{"role": "system", "content": "你是一个古诗词专家。"}, |
|
{"role": "user", "content": prompt} |
|
], |
|
temperature=0 |
|
) |
|
return response.choices[0].message.content |
|
|
|
elif self.provider == "google": |
|
chat = self.google_model.start_chat( |
|
history=[ |
|
{"role": "user", "parts": ["你是一个古诗词专家。"]} |
|
] |
|
) |
|
response = chat.send_message(prompt) |
|
return response.text |
|
|
|
except Exception as e: |
|
retries += 1 |
|
if retries > self.max_retries: |
|
raise e |
|
|
|
|
|
if "429" in str(e): |
|
wait_time = self.retry_delay * (2 ** (retries - 1)) |
|
print(f"\n遇到限流,等待 {wait_time} 秒后重试 ({retries}/{self.max_retries})...") |
|
time.sleep(wait_time) |
|
else: |
|
raise e |
|
|
|
def evaluate_single(self, question: Dict) -> Dict: |
|
"""评估单个问题""" |
|
prompt = self.generate_prompt(question) |
|
|
|
try: |
|
if self.dry_run: |
|
prediction = f"[DRY RUN] 这是问题 {question['id']} 的模拟答案" |
|
else: |
|
prediction = self.call_api_with_retry(prompt) |
|
|
|
time.sleep(self.delay) |
|
|
|
is_correct = self.evaluate_answer(prediction, question['content']['answer']) |
|
|
|
return { |
|
'id': question['id'], |
|
'type': question['type'], |
|
'difficulty': question['difficulty'], |
|
'metadata': question['metadata'], |
|
'prompt': prompt, |
|
'prediction': prediction, |
|
'ground_truth': question['content']['answer'], |
|
'is_correct': is_correct |
|
} |
|
|
|
except Exception as e: |
|
print(f"Error evaluating question {question['id']}: {str(e)}") |
|
return None |
|
|
|
def evaluate_all(self, questions: List[Dict]): |
|
"""评估所有问题""" |
|
|
|
total = len(questions) |
|
correct = 0 |
|
|
|
|
|
pbar = tqdm(questions, desc="Evaluating") |
|
|
|
for question in pbar: |
|
result = self.evaluate_single(question) |
|
if result: |
|
self.results['all'].append(result) |
|
self.results[result['type']].append(result) |
|
self.results[result['difficulty']].append(result) |
|
if result['metadata']['dynasty']: |
|
self.results[result['metadata']['dynasty']].append(result) |
|
|
|
|
|
if result['is_correct']: |
|
correct += 1 |
|
|
|
|
|
accuracy = correct / len(self.results['all']) * 100 |
|
pbar.set_description( |
|
f"Accuracy: {accuracy:.2f}% ({correct}/{len(self.results['all'])})" |
|
) |
|
|
|
|
|
print(f"\n问题 {result['id']} ({result['type']}, {result['difficulty']}):") |
|
print(f"提示: {result['prompt']}") |
|
print(f"预测: {result['prediction']}") |
|
print(f"答案: {result['ground_truth']}") |
|
print(f"结果: {'✓' if result['is_correct'] else '✗'}\n") |
|
|
|
|
|
print(f"当前总体准确率: {accuracy:.2f}%") |
|
print("-" * 80) |
|
|
|
def generate_report(self) -> Dict: |
|
"""生成评测报告""" |
|
report = { |
|
'overall': self._calculate_metrics(self.results['all']), |
|
'by_type': {}, |
|
'by_difficulty': {}, |
|
'by_dynasty': {} |
|
} |
|
|
|
|
|
for q_type in ['couplet', 'hint_words', 'find_poetry', 'blank_filling', 'first_last_words']: |
|
if self.results[q_type]: |
|
report['by_type'][q_type] = self._calculate_metrics(self.results[q_type]) |
|
|
|
|
|
for difficulty in ['easy', 'medium', 'hard']: |
|
if self.results[difficulty]: |
|
report['by_difficulty'][difficulty] = self._calculate_metrics(self.results[difficulty]) |
|
|
|
|
|
for dynasty in set(r['metadata']['dynasty'] for r in self.results['all'] if r['metadata']['dynasty']): |
|
report['by_dynasty'][dynasty] = self._calculate_metrics(self.results[dynasty]) |
|
|
|
return report |
|
|
|
def _calculate_metrics(self, results: List[Dict]) -> Dict: |
|
"""计算评测指标""" |
|
total = len(results) |
|
correct = sum(1 for r in results if r['is_correct']) |
|
return { |
|
'total': total, |
|
'correct': correct, |
|
'accuracy': correct / total if total > 0 else 0 |
|
} |
|
|
|
def save_results(self, output_dir: str): |
|
"""保存评测结果""" |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
with open(os.path.join(output_dir, 'detailed_results.jsonl'), 'w', encoding='utf-8') as f: |
|
for result in self.results['all']: |
|
f.write(json.dumps(result, ensure_ascii=False) + '\n') |
|
|
|
|
|
with open(os.path.join(output_dir, 'raw_io.jsonl'), 'w', encoding='utf-8') as f: |
|
for result in self.results['all']: |
|
raw_io = { |
|
'id': result['id'], |
|
'type': result['type'], |
|
'prompt': result['prompt'], |
|
'completion': result['prediction'], |
|
'ground_truth': result['ground_truth'] |
|
} |
|
f.write(json.dumps(raw_io, ensure_ascii=False) + '\n') |
|
|
|
|
|
report = self.generate_report() |
|
with open(os.path.join(output_dir, 'evaluation_report.json'), 'w', encoding='utf-8') as f: |
|
json.dump(report, ensure_ascii=False, indent=2, fp=f) |
|
|
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser(description='评测大语言模型的古诗词能力') |
|
|
|
parser.add_argument('--api-key', |
|
type=str, |
|
required=True, |
|
help='API密钥') |
|
|
|
parser.add_argument('--provider', |
|
type=str, |
|
choices=['openai', 'google'], |
|
default='openai', |
|
help='API提供商 (openai 或 google)') |
|
|
|
parser.add_argument('--model', |
|
type=str, |
|
help='要评测的模型名称') |
|
|
|
parser.add_argument('--api-base', |
|
type=str, |
|
help='API基础URL (例如: https://api.openai.com/v1)') |
|
|
|
parser.add_argument('--output-dir', |
|
type=str, |
|
default='evaluation_results', |
|
help='评测结果保存目录 (默认: evaluation_results)') |
|
|
|
parser.add_argument('--benchmark-file', |
|
type=str, |
|
default='poetry_benchmark.jsonl', |
|
help='评测数据集文件路径 (默认: poetry_benchmark.jsonl)') |
|
|
|
parser.add_argument('--dry-run', |
|
action='store_true', |
|
help='演示模式,不实际调用API') |
|
|
|
parser.add_argument('--delay', |
|
type=float, |
|
default=0.5, |
|
help='API调用间隔时间(秒) (默认: 0.5)') |
|
|
|
parser.add_argument('--max-retries', |
|
type=int, |
|
default=5, |
|
help='API调用最大重试次数 (默认: 5)') |
|
|
|
parser.add_argument('--retry-delay', |
|
type=float, |
|
default=10, |
|
help='重试等待时间(秒) (默认: 10)') |
|
|
|
args = parser.parse_args() |
|
|
|
|
|
if not args.dry_run and args.provider == "openai": |
|
openai.api_base = args.api_base |
|
|
|
|
|
evaluator = PoetryEvaluator( |
|
api_key=args.api_key, |
|
provider=args.provider, |
|
model=args.model, |
|
dry_run=args.dry_run, |
|
delay=args.delay, |
|
max_retries=args.max_retries, |
|
retry_delay=args.retry_delay |
|
) |
|
|
|
|
|
questions = evaluator.load_benchmark(args.benchmark_file) |
|
|
|
|
|
evaluator.evaluate_all(questions) |
|
|
|
|
|
report = evaluator.generate_report() |
|
print(json.dumps(report, ensure_ascii=False, indent=2)) |
|
|
|
|
|
evaluator.save_results(args.output_dir) |