Amber Tanaka commited on
Commit
e31d809
·
unverified ·
1 Parent(s): 85744c7

update links (#87)

Browse files
Files changed (3) hide show
  1. content.py +6 -0
  2. submission.py +1 -1
  3. ui_components.py +6 -3
content.py CHANGED
@@ -825,4 +825,10 @@ html {
825
  height: 18px;
826
  margin-right: 2px;
827
  }
 
 
 
 
 
 
828
  """
 
825
  height: 18px;
826
  margin-right: 2px;
827
  }
828
+ h3 .header-link-icon {
829
+ font-size: 12px;
830
+ vertical-align: text-top;
831
+ margin-left: 6px;
832
+ text-decoration: none;
833
+ }
834
  """
submission.py CHANGED
@@ -337,7 +337,7 @@ heading_html = """
337
  <h2>🚀 Submit an agent for evaluation</h2>
338
  <p>Submit your agent to AstaBench for evaluation on real-world scientific tasks. Once submitted, your run will be reviewed by our team. If there are any issues, we’ll reach out within 5–7 business days. We’re working toward full automation, but in the meantime, human review helps ensure quality and trust.</p>
339
  <h3>How to run an evaluation</h3>
340
- <p>Please follow the steps in our <a href="https://github.com/allenai/asta-bench" target="_blank">README</a>. You’ll upload your run file at the end of this form.</p>
341
  """
342
 
343
  # --- Submission Accordion ---
 
337
  <h2>🚀 Submit an agent for evaluation</h2>
338
  <p>Submit your agent to AstaBench for evaluation on real-world scientific tasks. Once submitted, your run will be reviewed by our team. If there are any issues, we’ll reach out within 5–7 business days. We’re working toward full automation, but in the meantime, human review helps ensure quality and trust.</p>
339
  <h3>How to run an evaluation</h3>
340
+ <p>Please follow the steps in our <a href="https://github.com/allenai/asta-bench?tab=readme-ov-file#usage" target="_blank">README</a>. You’ll upload your run file at the end of this form.</p>
341
  """
342
 
343
  # --- Submission Accordion ---
ui_components.py CHANGED
@@ -644,11 +644,14 @@ def create_benchmark_details_display(
644
  gr.Markdown("---")
645
  # 2. Loop through each benchmark and create its UI components
646
  for benchmark_name in benchmark_names:
647
- gr.HTML(f'''
648
- <h3 class="benchmark-title" id="{create_gradio_anchor_id(benchmark_name, validation)}">{benchmark_name} Leaderboard</h3>
 
 
649
  <div class="benchmark-description">{get_benchmark_description(benchmark_name, validation)}</div>
650
  <button onclick="scroll_to_element('page-content-wrapper')" class="primary-link-button">Return to the aggregate {category_name} leaderboard</button>
651
- ''')
 
652
 
653
  # 3. Prepare the data for this specific benchmark's table and plot
654
  benchmark_score_col = f"{benchmark_name} Score"
 
644
  gr.Markdown("---")
645
  # 2. Loop through each benchmark and create its UI components
646
  for benchmark_name in benchmark_names:
647
+ anchor_id = create_gradio_anchor_id(benchmark_name, validation)
648
+ gr.HTML(
649
+ f"""
650
+ <h3 class="benchmark-title" id="{anchor_id}">{benchmark_name} Leaderboard <a href="#{anchor_id}" class="header-link-icon">🔗</a></h3>
651
  <div class="benchmark-description">{get_benchmark_description(benchmark_name, validation)}</div>
652
  <button onclick="scroll_to_element('page-content-wrapper')" class="primary-link-button">Return to the aggregate {category_name} leaderboard</button>
653
+ """
654
+ )
655
 
656
  # 3. Prepare the data for this specific benchmark's table and plot
657
  benchmark_score_col = f"{benchmark_name} Score"