| | import os |
| | import argparse |
| | import json |
| | import numpy as np |
| | from tqdm import tqdm |
| | import nltk |
| | from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction |
| | from rouge import Rouge |
| | from sklearn.feature_extraction.text import TfidfVectorizer |
| | from sklearn.metrics.pairwise import cosine_similarity |
| | import re |
| | from textstat import flesch_reading_ease |
| | from datasets import load_dataset |
| | import openai |
| | from datetime import datetime |
| |
|
| | nltk.download('punkt', quiet=True) |
| | nltk.download('averaged_perceptron_tagger', quiet=True) |
| |
|
| | def preprocess(text): |
| | return nltk.word_tokenize(text.lower()) |
| |
|
| | def calculate_bleu(reference, candidate): |
| | reference_tokens = preprocess(reference) |
| | candidate_tokens = preprocess(candidate) |
| | smoothie = SmoothingFunction().method1 |
| | return sentence_bleu([reference_tokens], candidate_tokens, smoothing_function=smoothie) |
| |
|
| | def calculate_rouge(reference, candidate): |
| | rouge = Rouge() |
| | scores = rouge.get_scores(candidate, reference) |
| | return { |
| | 'rouge-1': scores[0]['rouge-1']['f'], |
| | 'rouge-2': scores[0]['rouge-2']['f'], |
| | 'rouge-l': scores[0]['rouge-l']['f'] |
| | } |
| |
|
| | def calculate_cosine_similarity(reference, candidate): |
| | vectorizer = TfidfVectorizer() |
| | tfidf_matrix = vectorizer.fit_transform([reference, candidate]) |
| | return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0] |
| |
|
| | def extract_sections(readme): |
| | sections = [] |
| | current_section = "" |
| | for line in readme.split('\n'): |
| | if line.strip().startswith('#'): |
| | if current_section: |
| | sections.append(current_section.strip()) |
| | current_section = line + "\n" |
| | else: |
| | current_section += line + "\n" |
| | if current_section: |
| | sections.append(current_section.strip()) |
| | return sections |
| |
|
| | def calculate_structural_similarity(reference, candidate): |
| | ref_sections = extract_sections(reference) |
| | cand_sections = extract_sections(candidate) |
| | |
| | section_diff = abs(len(ref_sections) - len(cand_sections)) |
| | |
| | ref_titles = [s.split('\n')[0] for s in ref_sections] |
| | cand_titles = [s.split('\n')[0] for s in cand_sections] |
| | title_similarity = len(set(ref_titles) & set(cand_titles)) / max(len(ref_titles), len(cand_titles)) |
| | |
| | return { |
| | 'section_difference': section_diff, |
| | 'title_similarity': title_similarity |
| | } |
| |
|
| | def information_retrieval_score(readme): |
| | key_sections = ['installation', 'usage', 'api', 'example', 'license'] |
| | found_sections = sum(1 for section in key_sections if section in readme.lower()) |
| | return found_sections / len(key_sections) |
| |
|
| | def code_readme_consistency(repo_content, readme): |
| | code_elements = set(re.findall(r'def\s+(\w+)', repo_content) + |
| | re.findall(r'class\s+(\w+)', repo_content)) |
| | |
| | mentioned_elements = sum(1 for element in code_elements if element in readme) |
| | |
| | return mentioned_elements / len(code_elements) if code_elements else 0 |
| |
|
| | def calculate_readability(text): |
| | return flesch_reading_ease(text) / 100 |
| |
|
| | def evaluate_readme(reference_readme, generated_readme, repo_content): |
| | bleu_score = calculate_bleu(reference_readme, generated_readme) |
| | rouge_scores = calculate_rouge(reference_readme, generated_readme) |
| | cosine_sim = calculate_cosine_similarity(reference_readme, generated_readme) |
| | structural_sim = calculate_structural_similarity(reference_readme, generated_readme) |
| | info_retrieval = information_retrieval_score(generated_readme) |
| | code_consistency = code_readme_consistency(repo_content, generated_readme) |
| | readability = calculate_readability(generated_readme) |
| | |
| | weights = { |
| | 'bleu': 0.1, |
| | 'rouge-1': 0.1, |
| | 'rouge-2': 0.1, |
| | 'rouge-l': 0.1, |
| | 'cosine_similarity': 0.1, |
| | 'structural_similarity': 0.1, |
| | 'information_retrieval': 0.15, |
| | 'code_consistency': 0.15, |
| | 'readability': 0.1 |
| | } |
| | |
| | weighted_score = ( |
| | weights['bleu'] * bleu_score + |
| | weights['rouge-1'] * rouge_scores['rouge-1'] + |
| | weights['rouge-2'] * rouge_scores['rouge-2'] + |
| | weights['rouge-l'] * rouge_scores['rouge-l'] + |
| | weights['cosine_similarity'] * cosine_sim + |
| | weights['structural_similarity'] * structural_sim['title_similarity'] + |
| | weights['information_retrieval'] * info_retrieval + |
| | weights['code_consistency'] * code_consistency + |
| | weights['readability'] * readability |
| | ) |
| | |
| | return { |
| | 'bleu': bleu_score, |
| | 'rouge': rouge_scores, |
| | 'cosine_similarity': cosine_sim, |
| | 'structural_similarity': structural_sim, |
| | 'information_retrieval': info_retrieval, |
| | 'code_consistency': code_consistency, |
| | 'readability': readability, |
| | 'weighted_score': weighted_score |
| | } |
| |
|
| | def generate_readme(repo_content, model, client): |
| | system_prompt = """You are an AI assistant tasked with creating a README.md file for a GitHub repository. |
| | Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks. |
| | The README should include the following sections: |
| | 1. Project Title |
| | 2. Description |
| | 3. Installation |
| | 4. Usage |
| | 5. Features |
| | 6. Contributing |
| | 7. License |
| | Ensure that your response is well-structured, informative, and directly usable as a README.md file.""" |
| |
|
| | user_prompt = f"Here is the content of the repository:\n\n{repo_content}\n\nBased on this content, please generate a README.md file." |
| |
|
| | response = client.chat.completions.create( |
| | model=model, |
| | messages=[ |
| | {"role": "system", "content": system_prompt}, |
| | {"role": "user", "content": user_prompt} |
| | ] |
| | ) |
| | |
| | return response.choices[0].message.content |
| |
|
| | def main(args): |
| | openai.api_key = os.getenv("OPENAI_API_KEY") |
| | if not openai.api_key: |
| | raise ValueError("OPENAI_API_KEY environment variable is not set") |
| |
|
| | client = openai.OpenAI(base_url=args.base_url) if args.base_url else openai.OpenAI() |
| |
|
| | dataset = load_dataset("patched-codes/generate-readme-eval") |
| | |
| | results = [] |
| | |
| | for item in tqdm(dataset['test'], desc="Processing repos"): |
| | try: |
| | generated_readme = generate_readme(item['repo_content'], args.model, client) |
| | eval_result = evaluate_readme(item['repo_readme'], generated_readme, item['repo_content']) |
| | |
| | eval_result['repo_name'] = item['repo_name'] |
| | results.append(eval_result) |
| | except Exception as e: |
| | print(f"Error processing repo {item['repo_name']}: {e}") |
| | continue |
| | |
| | average_scores = { |
| | 'bleu': np.mean([r['bleu'] for r in results]), |
| | 'rouge-1': np.mean([r['rouge']['rouge-1'] for r in results]), |
| | 'rouge-2': np.mean([r['rouge']['rouge-2'] for r in results]), |
| | 'rouge-l': np.mean([r['rouge']['rouge-l'] for r in results]), |
| | 'cosine_similarity': np.mean([r['cosine_similarity'] for r in results]), |
| | 'title_similarity': np.mean([r['structural_similarity']['title_similarity'] for r in results]), |
| | 'information_retrieval': np.mean([r['information_retrieval'] for r in results]), |
| | 'code_consistency': np.mean([r['code_consistency'] for r in results]), |
| | 'readability': np.mean([r['readability'] for r in results]), |
| | 'weighted_score': np.mean([r['weighted_score'] for r in results]) |
| | } |
| | |
| | |
| | print("\nEvaluation Results:") |
| | for metric, score in average_scores.items(): |
| | print(f"{metric}: {score:.4f}") |
| |
|
| | |
| | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
| | log_filename = f"{args.model}_results_{timestamp}.log" |
| | |
| | with open(log_filename, 'w') as log_file: |
| | log_file.write(f"Evaluation Results for model: {args.model}\n") |
| | log_file.write(f"Timestamp: {timestamp}\n\n") |
| | log_file.write("Average Scores:\n") |
| | for metric, score in average_scores.items(): |
| | log_file.write(f"{metric}: {score:.4f}\n") |
| | |
| | log_file.write(f"\nDetailed Results:\n") |
| | for result in results: |
| | log_file.write(f"\nRepository: {result['repo_name']}\n") |
| | log_file.write("Scores:\n") |
| | log_file.write(f" BLEU: {result['bleu']:.4f}\n") |
| | log_file.write(f" ROUGE-1: {result['rouge']['rouge-1']:.4f}\n") |
| | log_file.write(f" ROUGE-2: {result['rouge']['rouge-2']:.4f}\n") |
| | log_file.write(f" ROUGE-L: {result['rouge']['rouge-l']:.4f}\n") |
| | log_file.write(f" Cosine Similarity: {result['cosine_similarity']:.4f}\n") |
| | log_file.write(f" Title Similarity: {result['structural_similarity']['title_similarity']:.4f}\n") |
| | log_file.write(f" Information Retrieval: {result['information_retrieval']:.4f}\n") |
| | log_file.write(f" Code Consistency: {result['code_consistency']:.4f}\n") |
| | log_file.write(f" Readability: {result['readability']:.4f}\n") |
| | log_file.write(f" Weighted Score: {result['weighted_score']:.4f}\n") |
| |
|
| | print(f"\nResults saved to {log_filename}") |
| |
|
| | if __name__ == "__main__": |
| | parser = argparse.ArgumentParser(description="Generate and evaluate README files using OpenAI API") |
| | parser.add_argument("model", help="OpenAI model to use") |
| | parser.add_argument("--base_url", help="Optional base URL for OpenAI API", default=None) |
| | args = parser.parse_args() |
| | |
| | main(args) |
| |
|