File size: 5,541 Bytes
085a012
 
 
 
 
 
 
 
0db3899
 
085a012
 
 
 
 
7904c2d
085a012
 
 
7904c2d
085a012
 
 
 
0db3899
085a012
 
0db3899
085a012
 
 
 
0db3899
085a012
 
0db3899
085a012
 
 
0db3899
085a012
 
 
0db3899
 
 
085a012
0db3899
085a012
 
 
 
0db3899
 
 
085a012
0db3899
085a012
 
 
 
0db3899
 
 
085a012
0db3899
085a012
 
0db3899
 
 
085a012
0db3899
085a012
 
0db3899
 
 
 
 
 
 
 
085a012
7904c2d
085a012
 
 
 
 
0db3899
085a012
 
 
0db3899
 
 
 
 
 
085a012
 
 
0db3899
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import re

def create_gradio_anchor_id(text: str, validation) -> str:
    """
    Replicates the ID format created by gr.Markdown(header_links=True).
    Example: "Paper Finder Validation" -> "h-paper-finder-validation"
    """
    text = text.lower()
    text = re.sub(r'\s+', '-', text)
    text = re.sub(r'[^\w-]', '', text)
    if validation:
        return f"h-{text}-leaderboard-1"
    return f"h-{text}-leaderboard"


TITLE = """<h1 align="left" id="space-title">OpenHands Index</h1>"""

INTRO_PARAGRAPH = """
<p>
    <strong>OpenHands Index</strong> provides an aggregated view of agent performance and efficiency across all benchmarks in all categories. We report:
</p>

<ul class="info-list">
    <li>
        <strong>Overall score:</strong> A macro-average of the five category-level average scores. Each category contributes equally, regardless of how many benchmarks it includes. This ensures fair comparisons across agents with different domain strengths.
    </li>
    <li>
        <strong>Overall cost:</strong> A macro-average of the agent's cost per problem across all categories, in USD. Each category contributes equally.
    </li>
</ul>

<p>
    This view is designed for quick comparison of general-purpose coding agents. For more details on how we calculate scores and cost, please see the <a href="/about" style="color: #0FCB8C; text-decoration: underline;">About</a> Page.
</p>
"""

SCATTER_DISCLAIMER = """
**Note:** Agents without cost data are displayed to the right of the vertical divider line.
"""

PARETO_DISCLAIMER = """
Agents names that are green are Pareto optimal, meaning they achieve the best performance for their cost. 
"""

BUG_FIXING_DESCRIPTION = """
The **Bug Fixing** category evaluates how well agents can diagnose and fix bugs in real-world codebases. This tests their ability to understand GitHub issues, navigate repositories, identify root causes, and implement correct fixes.
<br><br>
The scores shown below reflect performance aggregated across two distinct benchmarks: SWE-bench (text-based bug reports) and SWE-bench-multimodal (issues with visual context like screenshots or diagrams). 
<br><br>
For detailed results, use the links above to explore individual benchmarks.
<br>
"""

APP_CREATION_DESCRIPTION = """
The **App Creation** category in OpenHands Index evaluates an agent's ability to build complete applications from scratch based on natural language specifications. This tests whether agents can understand requirements, design architecture, write modular code, and create working applications.
<br><br>
This category currently includes Commit0, which challenges agents to implement complete features and applications by generating the initial commit for a project.
<br><br>
For detailed results, use the links above to explore individual benchmark pages.
<br>
"""

FRONTEND_DEVELOPMENT_DESCRIPTION = """
The **Frontend Development** category evaluates agents on their ability to build user interfaces and web applications. This tests skills in HTML, CSS, JavaScript frameworks, responsive design, and creating interactive user experiences.
<br><br>
This category includes Multi-SWE-bench, which challenges agents to work across multiple repositories and coordinate changes in complex web application architectures.
<br>
"""

TEST_GENERATION_DESCRIPTION = """
The **Test Generation** category evaluates agents on their ability to create comprehensive test suites for existing code. This tests their understanding of code behavior, edge cases, and the ability to write effective unit tests, integration tests, and end-to-end tests.
<br><br>
This category includes SWT-bench (Software Testing Benchmark), which challenges agents to generate high-quality test cases that achieve good coverage and catch real bugs.
<br>
"""

INFORMATION_GATHERING_DESCRIPTION = """
The **Information Gathering** category tests whether agents can effectively search for information, synthesize knowledge from multiple sources, and answer complex questions that require tool use and reasoning.
<br><br>
This category includes GAIA (General AI Assistant benchmark), which evaluates agents on real-world assistant tasks that require web search, file manipulation, and multi-step reasoning to gather and process information.
<br>
"""

SUBMISSION_CONFIRMATION = """
**Your agent has been submitted to OpenHands Index for evaluation.**
<br><br>
πŸ™ Thanks for contributing!
<br><br>
You'll receive a confirmation email from our team within 2 business days with next steps. We will reach out to you directly if further information is needed.
<br><br>
We appreciate your support in advancing AI coding agents.
"""

# External URLs for benchmark descriptions
SWE_BENCH_URL = "https://www.swebench.com/"
SWE_BENCH_MULTIMODAL_URL = "https://www.swebench.com/multimodal"
MULTI_SWE_BENCH_URL = "https://github.com/multi-swe-bench"
SWT_BENCH_URL = "https://github.com/swt-bench/swt-bench"
COMMIT0_URL = "https://commit-0.github.io/"
GAIA_URL = "https://huggingface.co/gaia-benchmark"

# Helper function to create external links
def external_link(url, text, is_s2_url=False):
    if is_s2_url:
        # For semantic scholar URLs, we want to show the S2 logo
        return f'<a href="{url}" target="_blank" class="benchmark-link"><img src="https://www.semanticscholar.org/img/semantic_scholar_icon.svg" alt="S2" class="s2-logo"> {text}</a>'
    else:
        # For other URLs, just show the text with external link icon
        return f'<a href="{url}" target="_blank" class="benchmark-link">{text} πŸ”—</a>'