| | |
| | """ |
| | Code Completion Benchmark for CodeReality-1T Dataset |
| | Evaluates code completion models using Pass@k metrics |
| | """ |
| |
|
| | import json |
| | import os |
| | import re |
| | import random |
| | from typing import Dict, List, Tuple, Optional |
| | from collections import defaultdict |
| |
|
| | def load_dataset_sample(data_dir: str, sample_size: int = 200) -> List[Dict]: |
| | """Load sample of repositories with code files.""" |
| | print(f"🔍 Loading sample of {sample_size} repositories with code files...") |
| |
|
| | repositories = [] |
| | files = [f for f in os.listdir(data_dir) if f.endswith('.jsonl')] |
| | random.shuffle(files) |
| |
|
| | for filename in files[:15]: |
| | file_path = os.path.join(data_dir, filename) |
| | try: |
| | with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
| | for line in f: |
| | if len(repositories) >= sample_size: |
| | break |
| | try: |
| | repo_data = json.loads(line) |
| | |
| | if has_code_files(repo_data): |
| | repositories.append(repo_data) |
| | except json.JSONDecodeError: |
| | continue |
| | except Exception as e: |
| | continue |
| |
|
| | if len(repositories) >= sample_size: |
| | break |
| |
|
| | print(f"✅ Loaded {len(repositories)} repositories with code files") |
| | return repositories |
| |
|
| | def has_code_files(repo: Dict) -> bool: |
| | """Check if repository contains code files.""" |
| | code_extensions = {'.py', '.js', '.java', '.cpp', '.c', '.go', '.rs', '.ts'} |
| |
|
| | files = repo.get('files', []) |
| | for file_obj in files: |
| | if isinstance(file_obj, dict): |
| | file_path = file_obj.get('path', '') |
| | if any(file_path.endswith(ext) for ext in code_extensions): |
| | return True |
| | return False |
| |
|
| | def extract_function_snippets(repo: Dict, language: str = 'python') -> List[Dict]: |
| | """Extract function definitions for completion tasks.""" |
| | snippets = [] |
| |
|
| | |
| | patterns = { |
| | 'python': r'def\s+(\w+)\s*\([^)]*\):\s*', |
| | 'javascript': r'function\s+(\w+)\s*\([^)]*\)\s*{', |
| | 'java': r'(?:public|private|protected)?\s*(?:static)?\s*\w+\s+(\w+)\s*\([^)]*\)\s*{', |
| | 'cpp': r'\w+\s+(\w+)\s*\([^)]*\)\s*{', |
| | } |
| |
|
| | if language not in patterns: |
| | return snippets |
| |
|
| | pattern = patterns[language] |
| | extension_map = { |
| | 'python': '.py', |
| | 'javascript': '.js', |
| | 'java': '.java', |
| | 'cpp': '.cpp' |
| | } |
| |
|
| | target_ext = extension_map[language] |
| |
|
| | files = repo.get('files', []) |
| | for file_obj in files: |
| | if isinstance(file_obj, dict): |
| | file_path = file_obj.get('path', '') |
| | content = file_obj.get('content', '') |
| |
|
| | if file_path.endswith(target_ext) and content: |
| | matches = list(re.finditer(pattern, content, re.MULTILINE)) |
| |
|
| | for match in matches: |
| | start_pos = match.start() |
| | function_name = match.group(1) |
| |
|
| | |
| | lines_before = content[:start_pos].split('\n') |
| | context_lines = lines_before[-5:] if len(lines_before) >= 5 else lines_before |
| | context = '\n'.join(context_lines) |
| |
|
| | |
| | remaining_content = content[start_pos:] |
| | lines = remaining_content.split('\n') |
| |
|
| | function_lines = [] |
| | indent_level = None |
| |
|
| | for i, line in enumerate(lines): |
| | if i == 0: |
| | function_lines.append(line) |
| | continue |
| |
|
| | |
| | if indent_level is None and line.strip(): |
| | indent_level = len(line) - len(line.lstrip()) |
| |
|
| | |
| | if line.strip() and indent_level is not None: |
| | current_indent = len(line) - len(line.lstrip()) |
| | if current_indent <= indent_level and not line.strip().startswith(('if', 'for', 'while', 'try', 'except', 'else', 'elif')): |
| | break |
| |
|
| | function_lines.append(line) |
| |
|
| | |
| | if len(function_lines) > 20: |
| | break |
| |
|
| | function_body = '\n'.join(function_lines) |
| |
|
| | |
| | if len(function_lines) > 3: |
| | snippets.append({ |
| | 'function_name': function_name, |
| | 'context': context, |
| | 'prompt': function_lines[0], |
| | 'completion': '\n'.join(function_lines[1:]), |
| | 'file_path': file_path, |
| | 'language': language |
| | }) |
| |
|
| | return snippets |
| |
|
| | def simple_code_completion_model(prompt: str, language: str) -> List[str]: |
| | """Simple rule-based code completion for demonstration.""" |
| | completions = [] |
| |
|
| | |
| | templates = { |
| | 'python': [ |
| | " pass", |
| | " return None", |
| | " # TODO: implement this function\n pass", |
| | " result = None\n return result", |
| | " # Implementation needed\n raise NotImplementedError()" |
| | ], |
| | 'javascript': [ |
| | " return null;", |
| | " // TODO: implement\n return;", |
| | " throw new Error('Not implemented');", |
| | " var result = null;\n return result;", |
| | " console.log('Function called');\n return;" |
| | ], |
| | 'java': [ |
| | " return null;", |
| | " // TODO: implement this method\n return null;", |
| | " throw new UnsupportedOperationException();", |
| | " Object result = null;\n return result;", |
| | " System.out.println(\"Method called\");\n return null;" |
| | ] |
| | } |
| |
|
| | if language in templates: |
| | |
| | return templates[language] |
| | else: |
| | return ["// TODO: implement"] |
| |
|
| | def evaluate_completion_quality(predicted: str, actual: str) -> float: |
| | """Simple evaluation of completion quality.""" |
| | |
| | pred_lines = [line.strip() for line in predicted.split('\n') if line.strip()] |
| | actual_lines = [line.strip() for line in actual.split('\n') if line.strip()] |
| |
|
| | if not actual_lines: |
| | return 0.0 |
| |
|
| | |
| | score = 0.0 |
| |
|
| | |
| | empty_indicators = ['pass', 'todo', 'not implemented', 'null', 'return;', 'return null'} |
| | pred_empty = any(indicator in predicted.lower() for indicator in empty_indicators) |
| | actual_empty = any(indicator in actual.lower() for indicator in empty_indicators) |
| |
|
| | if pred_empty and actual_empty: |
| | score += 0.8 |
| | elif not pred_empty and not actual_empty: |
| | |
| | pred_keywords = set(re.findall(r'\b\w+\b', predicted.lower())) |
| | actual_keywords = set(re.findall(r'\b\w+\b', actual.lower())) |
| |
|
| | if actual_keywords: |
| | keyword_overlap = len(pred_keywords & actual_keywords) / len(actual_keywords) |
| | score += keyword_overlap * 0.6 |
| |
|
| | |
| | line_ratio = min(len(pred_lines), len(actual_lines)) / max(len(pred_lines), len(actual_lines)) |
| | score += line_ratio * 0.4 |
| |
|
| | return min(score, 1.0) |
| |
|
| | def calculate_pass_at_k(completion_results: List[Tuple[List[str], str]], k: int = 1) -> float: |
| | """Calculate Pass@k metric.""" |
| | if k <= 0: |
| | return 0.0 |
| |
|
| | total_passed = 0 |
| |
|
| | for completions, ground_truth in completion_results: |
| | |
| | top_k_completions = completions[:k] |
| |
|
| | |
| | passed = False |
| | for completion in top_k_completions: |
| | quality_score = evaluate_completion_quality(completion, ground_truth) |
| | if quality_score > 0.5: |
| | passed = True |
| | break |
| |
|
| | if passed: |
| | total_passed += 1 |
| |
|
| | return total_passed / len(completion_results) if completion_results else 0.0 |
| |
|
| | def run_completion_benchmark(repositories: List[Dict]) -> Dict: |
| | """Run code completion benchmark.""" |
| | print("🧮 Running code completion benchmark...") |
| |
|
| | results = { |
| | 'total_repositories': len(repositories), |
| | 'completion_tasks': [], |
| | 'language_stats': defaultdict(int), |
| | 'pass_at_1': 0.0, |
| | 'pass_at_3': 0.0, |
| | 'pass_at_5': 0.0, |
| | 'average_quality': 0.0 |
| | } |
| |
|
| | completion_results = [] |
| | quality_scores = [] |
| |
|
| | |
| | for repo in repositories: |
| | for language in ['python', 'javascript', 'java']: |
| | snippets = extract_function_snippets(repo, language) |
| |
|
| | for snippet in snippets[:2]: |
| | results['language_stats'][language] += 1 |
| |
|
| | |
| | completions = simple_code_completion_model(snippet['prompt'], language) |
| | ground_truth = snippet['completion'] |
| |
|
| | completion_results.append((completions, ground_truth)) |
| |
|
| | |
| | if completions: |
| | quality = evaluate_completion_quality(completions[0], ground_truth) |
| | quality_scores.append(quality) |
| |
|
| | results['completion_tasks'].append({ |
| | 'function_name': snippet['function_name'], |
| | 'language': language, |
| | 'prompt_length': len(snippet['prompt']), |
| | 'completion_length': len(ground_truth) |
| | }) |
| |
|
| | |
| | results['pass_at_1'] = calculate_pass_at_k(completion_results, 1) |
| | results['pass_at_3'] = calculate_pass_at_k(completion_results, 3) |
| | results['pass_at_5'] = calculate_pass_at_k(completion_results, 5) |
| | results['average_quality'] = sum(quality_scores) / len(quality_scores) if quality_scores else 0.0 |
| |
|
| | return results |
| |
|
| | def print_benchmark_results(results: Dict): |
| | """Print formatted benchmark results.""" |
| | print("=" * 60) |
| | print("🎯 CODE COMPLETION BENCHMARK RESULTS") |
| | print("=" * 60) |
| |
|
| | print(f"Total repositories: {results['total_repositories']}") |
| | print(f"Completion tasks: {len(results['completion_tasks'])}") |
| |
|
| | print(f"\n📊 Pass@k Metrics:") |
| | print(f" Pass@1: {results['pass_at_1']:.3f}") |
| | print(f" Pass@3: {results['pass_at_3']:.3f}") |
| | print(f" Pass@5: {results['pass_at_5']:.3f}") |
| | print(f" Average Quality: {results['average_quality']:.3f}") |
| |
|
| | print(f"\n🔤 Language Distribution:") |
| | for language, count in sorted(results['language_stats'].items(), key=lambda x: x[1], reverse=True): |
| | print(f" {language}: {count} functions") |
| |
|
| | print(f"\n💡 Insights:") |
| | print("- This is a simplified demonstration benchmark") |
| | print("- Real evaluation requires more sophisticated code execution") |
| | print("- CodeReality-1T provides diverse, noisy code for robust testing") |
| | print("- Consider functional correctness testing for production models") |
| |
|
| | def main(): |
| | """Run code completion benchmark.""" |
| | print("🚀 CodeReality-1T Code Completion Benchmark") |
| | print("=" * 60) |
| |
|
| | |
| | data_dir = "/mnt/z/CodeReality_Final/unified_dataset" |
| | sample_size = 100 |
| |
|
| | if not os.path.exists(data_dir): |
| | print(f"❌ Dataset directory not found: {data_dir}") |
| | print("Please update the data_dir path to point to your CodeReality-1T dataset") |
| | return |
| |
|
| | |
| | repositories = load_dataset_sample(data_dir, sample_size) |
| |
|
| | if not repositories: |
| | print("❌ No repositories loaded. Check dataset path.") |
| | return |
| |
|
| | |
| | results = run_completion_benchmark(repositories) |
| |
|
| | |
| | print_benchmark_results(results) |
| |
|
| | |
| | output_file = "code_completion_results.json" |
| | with open(output_file, 'w') as f: |
| | |
| | results_json = { |
| | 'total_repositories': results['total_repositories'], |
| | 'completion_tasks': results['completion_tasks'], |
| | 'language_stats': dict(results['language_stats']), |
| | 'pass_at_1': results['pass_at_1'], |
| | 'pass_at_3': results['pass_at_3'], |
| | 'pass_at_5': results['pass_at_5'], |
| | 'average_quality': results['average_quality'] |
| | } |
| | json.dump(results_json, f, indent=2) |
| |
|
| | print(f"\n💾 Results saved to: {output_file}") |
| |
|
| | if __name__ == "__main__": |
| | main() |