Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,6 +23,7 @@ def get_datasets():
|
|
| 23 |
|
| 24 |
all_datasets = get_datasets()
|
| 25 |
|
|
|
|
| 26 |
def get_split(dataset_name):
|
| 27 |
if dataset_name == "Communication Networks: unseen questions":
|
| 28 |
split = load_dataset("Short-Answer-Feedback/saf_communication_networks_english", split="test_unseen_questions")
|
|
@@ -37,6 +38,7 @@ def get_split(dataset_name):
|
|
| 37 |
if dataset_name == "Legal Domain: unseen answers":
|
| 38 |
split = load_dataset("Short-Answer-Feedback/saf_legal_domain_german", split="test_unseen_answers")
|
| 39 |
return split
|
|
|
|
| 40 |
|
| 41 |
def get_model(datasetname):
|
| 42 |
if datasetname == "Communication Networks: unseen questions" or datasetname == "Communication Networks: unseen answers":
|
|
@@ -47,6 +49,7 @@ def get_model(datasetname):
|
|
| 47 |
model = "Short-Answer-Feedback/mbart-finetuned-saf-legal-domain"
|
| 48 |
return model
|
| 49 |
|
|
|
|
| 50 |
def get_tokenizer(datasetname):
|
| 51 |
if datasetname == "Communication Networks: unseen questions" or datasetname == "Communication Networks: unseen answers":
|
| 52 |
tokenizer = "Short-Answer-Feedback/bart-finetuned-saf-communication-networks"
|
|
@@ -234,6 +237,102 @@ def load_data():
|
|
| 234 |
|
| 235 |
df = pd.concat([df, new_row])
|
| 236 |
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
|
| 238 |
dataframe = load_data()
|
| 239 |
|
|
|
|
| 23 |
|
| 24 |
all_datasets = get_datasets()
|
| 25 |
|
| 26 |
+
'''
|
| 27 |
def get_split(dataset_name):
|
| 28 |
if dataset_name == "Communication Networks: unseen questions":
|
| 29 |
split = load_dataset("Short-Answer-Feedback/saf_communication_networks_english", split="test_unseen_questions")
|
|
|
|
| 38 |
if dataset_name == "Legal Domain: unseen answers":
|
| 39 |
split = load_dataset("Short-Answer-Feedback/saf_legal_domain_german", split="test_unseen_answers")
|
| 40 |
return split
|
| 41 |
+
'''
|
| 42 |
|
| 43 |
def get_model(datasetname):
|
| 44 |
if datasetname == "Communication Networks: unseen questions" or datasetname == "Communication Networks: unseen answers":
|
|
|
|
| 49 |
model = "Short-Answer-Feedback/mbart-finetuned-saf-legal-domain"
|
| 50 |
return model
|
| 51 |
|
| 52 |
+
'''
|
| 53 |
def get_tokenizer(datasetname):
|
| 54 |
if datasetname == "Communication Networks: unseen questions" or datasetname == "Communication Networks: unseen answers":
|
| 55 |
tokenizer = "Short-Answer-Feedback/bart-finetuned-saf-communication-networks"
|
|
|
|
| 237 |
|
| 238 |
df = pd.concat([df, new_row])
|
| 239 |
return df
|
| 240 |
+
'''
|
| 241 |
+
|
| 242 |
+
def get_rows(datasetname):
|
| 243 |
+
if datasetname == "Communication Networks: unseen questions":
|
| 244 |
+
row = pd.DataFrame(
|
| 245 |
+
{
|
| 246 |
+
'Model': get_model(datasetname),
|
| 247 |
+
'Dataset': datasetname,
|
| 248 |
+
'SacreBLEU': [2.4],
|
| 249 |
+
'ROUGE-2': [20.1],
|
| 250 |
+
'METEOR': [28.5],
|
| 251 |
+
'BERTScore': [36.6],
|
| 252 |
+
'Accuracy': [51.6],
|
| 253 |
+
'Weighted F1': [41.0],
|
| 254 |
+
'Macro F1': [27.9],
|
| 255 |
+
}
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
if datasetname == "Communication Networks: unseen answers":
|
| 259 |
+
row = pd.DataFrame(
|
| 260 |
+
{
|
| 261 |
+
'Model': get_model(datasetname),
|
| 262 |
+
'Dataset': datasetname,
|
| 263 |
+
'SacreBLEU': [36.0],
|
| 264 |
+
'ROUGE-2': [49.1],
|
| 265 |
+
'METEOR': [60.8],
|
| 266 |
+
'BERTScore': [69.5],
|
| 267 |
+
'Accuracy': [76.0],
|
| 268 |
+
'Weighted F1': [73.0],
|
| 269 |
+
'Macro F1: [53.4]'
|
| 270 |
+
}
|
| 271 |
+
)
|
| 272 |
+
if datasetname == "Micro Job: unseen questions":
|
| 273 |
+
row = pd.DataFrame(
|
| 274 |
+
{
|
| 275 |
+
'Model': get_model(datasetname),
|
| 276 |
+
'Dataset': datasetname,
|
| 277 |
+
'SacreBLEU': [0.3],
|
| 278 |
+
'ROUGE-2': [0.5],
|
| 279 |
+
'METEOR': [33.8],
|
| 280 |
+
'BERTScore': [31.3],
|
| 281 |
+
'Accuracy': [48.7],
|
| 282 |
+
'Weighted F1': [46.5],
|
| 283 |
+
'Macro F1': [40.6],
|
| 284 |
+
}
|
| 285 |
+
)
|
| 286 |
+
if datasetname == "Micro Job: unseen answers":
|
| 287 |
+
row = pd.DataFrame(
|
| 288 |
+
{
|
| 289 |
+
'Model': get_model(datasetname),
|
| 290 |
+
'Dataset': datasetname,
|
| 291 |
+
'SacreBLEU': [39.5],
|
| 292 |
+
'ROUGE-2': [29.8],
|
| 293 |
+
'METEOR': [63.3],
|
| 294 |
+
'BERTScore': [63.1],
|
| 295 |
+
'Accuracy': [80.1],
|
| 296 |
+
'Weighted F1': [80.3],
|
| 297 |
+
'Macro F1': [80.7],
|
| 298 |
+
}
|
| 299 |
+
)
|
| 300 |
+
if datasetname == "Legal Domain: unseen questions":
|
| 301 |
+
row = pd.DataFrame(
|
| 302 |
+
{
|
| 303 |
+
'Model': get_model(datasetname),
|
| 304 |
+
'Dataset': datasetname,
|
| 305 |
+
'SacreBLEU': [3.2],
|
| 306 |
+
'ROUGE-2': [5.0],
|
| 307 |
+
'METEOR': [20.0],
|
| 308 |
+
'BERTScore': [14.8],
|
| 309 |
+
'Accuracy': [60.7],
|
| 310 |
+
'Weighted F1': [55.3],
|
| 311 |
+
'Macro F1': [55.4],
|
| 312 |
+
}
|
| 313 |
+
)
|
| 314 |
+
if datasetname == "Legal Domain: unseen answers":
|
| 315 |
+
row = pd.DataFrame(
|
| 316 |
+
{
|
| 317 |
+
'Model': get_model(datasetname),
|
| 318 |
+
'Dataset': datasetname,
|
| 319 |
+
'SacreBLEU': [42.8],
|
| 320 |
+
'ROUGE-2': [43.7],
|
| 321 |
+
'METEOR': [58.2],
|
| 322 |
+
'BERTScore': [57.5],
|
| 323 |
+
'Accuracy': [81.0],
|
| 324 |
+
'Weighted F1': [80.1],
|
| 325 |
+
'Macro F1': [74.6],
|
| 326 |
+
}
|
| 327 |
+
)
|
| 328 |
+
return row
|
| 329 |
+
|
| 330 |
+
def load_data():
|
| 331 |
+
df = pd.DataFrame(columns=['Model', 'Dataset', 'SacreBLEU', 'ROUGE-2', 'METEOR', 'BERTScore', 'Accuracy', 'Weighted F1', 'Macro F1'])
|
| 332 |
+
for ds in all_datasets:
|
| 333 |
+
new_row = get_rows(ds)
|
| 334 |
+
df = pd.concat([df, new_row])
|
| 335 |
+
return df
|
| 336 |
|
| 337 |
dataframe = load_data()
|
| 338 |
|