diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..17f7aae34 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +assets/PromptSource[[:space:]]ACL[[:space:]]Demo[[:space:]]Figure.png filter=lfs diff=lfs merge=lfs -text diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6285fb61b..c73fcaaff 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,7 +29,7 @@ You can always update the name later. If you want to cancel the prompt, select 1. **Write the prompt**. In the box labeled "Template," enter a Jinja expression. See the [getting started guide](#getting-started-using-jinja-to-write-prompts) and [cookbook](#jinja-cookbook) for details on how to write templates. -1. **Fill in metadata**. Fill in the metadata for the current prompt: reference, original task, choices in templates, and answer choices. +1. **Fill in metadata**. Fill in the metadata for the current prompt: reference, original task, choices in templates, metrics, languages, and answer choices. See [Metadata](#metadata) for more details about these fields. 1. **Save the prompt**. Hit the "Save" button. The output of the prompt applied to the current example will appear in the right sidebar. @@ -124,6 +124,7 @@ to generate a question for a given answer would not. the options for the possible outputs (regardless of whether `answer_choices` is used). * **Metrics.** Use the multiselect widget to select all metrics commonly used to evaluate this task. Choose “Other” if there is one that is not included in the list. +* **Languages.** Use the multiselect widget to select all languages used in the prompt. This is independent of what languages are used in the underlying dataset. For example, you could have an English prompt for a Spanish dataset. * **Answer Choices.** If the prompt has a small set of possible outputs (e.g., Yes/No, class labels, entailment judgements, etc.), then the prompt should define and use answer choices as follows. This allows evaluation to consider just the possible targets for diff --git a/README.md b/README.md index deb2c4a4d..a55550e11 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ PromptSource provides the tools to create, and share natural language prompts (s Question: Does this imply that "{{hypothesis}}"? Yes, no, or maybe? ||| {{answer_choices[label]}} ``` -**You can browse through existing prompts on the [hosted version of PromptSource](https://bigscience.huggingface.co/promptsource).** +**You can browse through existing prompts on the [hosted version of PromptSource](https://huggingface.co/spaces/bigscience/promptsource).** ## Setup If you do not intend to modify prompts, you can simply run: diff --git a/assets/PromptSource ACL Demo Figure.png b/assets/PromptSource ACL Demo Figure.png index f9309d276..2298a7958 100644 Binary files a/assets/PromptSource ACL Demo Figure.png and b/assets/PromptSource ACL Demo Figure.png differ diff --git a/assets/promptsource_app.png b/assets/promptsource_app.png index 8aba73f47..ef8050838 100644 Binary files a/assets/promptsource_app.png and b/assets/promptsource_app.png differ diff --git a/promptsource/__init__.py b/promptsource/__init__.py index d4dacf2cb..21eaa37ed 100644 --- a/promptsource/__init__.py +++ b/promptsource/__init__.py @@ -1 +1,4 @@ -DEFAULT_PROMPTSOURCE_CACHE_HOME = "~/.cache/promptsource" +from pathlib import Path + + +DEFAULT_PROMPTSOURCE_CACHE_HOME = str(Path("~/.cache/promptsource").expanduser()) diff --git a/promptsource/app.py b/promptsource/app.py index ed1bc7076..8ca9d8e1d 100644 --- a/promptsource/app.py +++ b/promptsource/app.py @@ -1,20 +1,23 @@ import argparse import functools import multiprocessing +import os import textwrap +from hashlib import sha256 from multiprocessing import Manager, Pool import pandas as pd import plotly.express as px import streamlit as st from datasets import get_dataset_infos +from datasets.info import DatasetInfosDict from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import DjangoLexer -from templates import INCLUDED_USERS +from promptsource import DEFAULT_PROMPTSOURCE_CACHE_HOME from promptsource.session import _get_state -from promptsource.templates import DatasetTemplates, Template, TemplateCollection +from promptsource.templates import INCLUDED_USERS, LANGUAGES, METRICS, DatasetTemplates, Template, TemplateCollection from promptsource.utils import ( get_dataset, get_dataset_confs, @@ -25,6 +28,9 @@ ) +DATASET_INFOS_CACHE_DIR = os.path.join(DEFAULT_PROMPTSOURCE_CACHE_HOME, "DATASET_INFOS") +os.makedirs(DATASET_INFOS_CACHE_DIR, exist_ok=True) + # Python 3.8 switched the default start method from fork to spawn. OS X also has # some issues related to fork, eee, e.g., https://github.com/bigscience-workshop/promptsource/issues/572 # so we make sure we always use spawn for consistency @@ -38,7 +44,28 @@ def get_infos(all_infos, d_name): :param all_infos: multiprocess-safe dictionary :param d_name: dataset name """ - all_infos[d_name] = get_dataset_infos(d_name) + d_name_bytes = d_name.encode("utf-8") + d_name_hash = sha256(d_name_bytes) + foldername = os.path.join(DATASET_INFOS_CACHE_DIR, d_name_hash.hexdigest()) + if os.path.isdir(foldername): + infos_dict = DatasetInfosDict.from_directory(foldername) + else: + infos = get_dataset_infos(d_name) + infos_dict = DatasetInfosDict(infos) + os.makedirs(foldername) + infos_dict.write_to_directory(foldername) + all_infos[d_name] = infos_dict + + +def format_language(tag): + """ + Formats a language tag for display in the UI. + + For example, if the tag is "en", then the function returns "en (English)" + :param tag: language tag + :return: formatted language name + """ + return tag + " (" + LANGUAGES[tag] + ")" # add an argument for read-only @@ -181,11 +208,13 @@ def show_text(t, width=WIDTH, with_markdown=False): else: subset_infos = infos[subset_name] - split_sizes = {k: v.num_examples for k, v in subset_infos.splits.items()} + try: + split_sizes = {k: v.num_examples for k, v in subset_infos.splits.items()} + except Exception: + # Fixing bug in some community datasets. + # For simplicity, just filling `split_sizes` with nothing, so the displayed split sizes will be 0. + split_sizes = {} else: - # Zaid/coqa_expanded and Zaid/quac_expanded don't have dataset_infos.json - # so infos is an empty dic, and `infos[list(infos.keys())[0]]` raises an error - # For simplicity, just filling `split_sizes` with nothing, so the displayed split sizes will be 0. split_sizes = {} # Collect template counts, original task counts and names @@ -284,13 +313,18 @@ def show_text(t, width=WIDTH, with_markdown=False): except OSError as e: st.error( f"Some datasets are not handled automatically by `datasets` and require users to download the " - f"dataset manually. This applies to {dataset_key}{f'/{subset_name}' if subset_name is not None else ''}. " - f"\n\nPlease download the raw dataset to `~/.cache/promptsource/{dataset_key}{f'/{subset_name}' if subset_name is not None else ''}`. " + f"dataset manually. It is possibly the case for {dataset_key}{f'/{subset_name}' if subset_name is not None else ''}. " + f"\n\nIf so, please download the raw dataset to `~/.cache/promptsource/{dataset_key}{f'/{subset_name}' if subset_name is not None else ''}`. " f"\n\nYou can choose another cache directory by overriding `PROMPTSOURCE_MANUAL_DATASET_DIR` environment " f"variable and downloading raw dataset to `$PROMPTSOURCE_MANUAL_DATASET_DIR/{dataset_key}{f'/{subset_name}' if subset_name is not None else ''}`" f"\n\nOriginal error:\n{str(e)}" ) st.stop() + except Exception as e: + st.error( + f"An error occured while loading the dataset {dataset_key}{f'/{subset_name}' if subset_name is not None else ''}. " + f"\\n\nOriginal error:\n{str(e)}" + ) splits = list(dataset.keys()) index = 0 @@ -403,6 +437,11 @@ def show_text(t, width=WIDTH, with_markdown=False): st.text(template.metadata.choices_in_prompt) st.markdown("##### Metrics") st.text(", ".join(template.metadata.metrics) if template.metadata.metrics else None) + st.markdown("##### Prompt Languages") + if template.metadata.languages: + st.text(", ".join([format_language(tag) for tag in template.metadata.languages])) + else: + st.text(None) st.markdown("##### Answer Choices") if template.get_answer_choices_expr() is not None: show_jinja(template.get_answer_choices_expr()) @@ -539,35 +578,24 @@ def show_text(t, width=WIDTH, with_markdown=False): help="Prompt explicitly lists choices in the template for the output.", ) - # Metrics from here: - # https://github.com/google-research/text-to-text-transfer-transformer/blob/4b580f23968c2139be7fb1cd53b22c7a7f686cdf/t5/evaluation/metrics.py - metrics_choices = [ - "BLEU", - "ROUGE", - "Squad", - "Trivia QA", - "Accuracy", - "Pearson Correlation", - "Spearman Correlation", - "MultiRC", - "AUC", - "COQA F1", - "Edit Distance", - ] - # Add mean reciprocal rank - metrics_choices.append("Mean Reciprocal Rank") - # Add generic other - metrics_choices.append("Other") - # Sort alphabetically - metrics_choices = sorted(metrics_choices) state.metadata.metrics = st.multiselect( "Metrics", - metrics_choices, + sorted(METRICS), default=template.metadata.metrics, help="Select all metrics that are commonly used (or should " "be used if a new task) to evaluate this prompt.", ) + state.metadata.languages = st.multiselect( + "Prompt Languages", + sorted(LANGUAGES.keys()), + default=template.metadata.languages, + format_func=format_language, + help="Select all languages used in this prompt. " + "This annotation is independent from the language(s) " + "of the dataset.", + ) + # Answer choices if template.get_answer_choices_expr() is not None: answer_choices = template.get_answer_choices_expr() diff --git a/promptsource/templates.py b/promptsource/templates.py index 3ece927fe..8a03407af 100644 --- a/promptsource/templates.py +++ b/promptsource/templates.py @@ -28,6 +28,212 @@ # filter_english_datasets (regardless of their metadata) INCLUDED_USERS = {"Zaid", "craffel"} +# These are the metrics with which templates can be tagged +METRICS = { + "BLEU", + "ROUGE", + "Squad", + "Trivia QA", + "Accuracy", + "Pearson Correlation", + "Spearman Correlation", + "MultiRC", + "AUC", + "COQA F1", + "Edit Distance", + "Mean Reciprocal Rank", + "Other", +} + +# These are the languages with which templates can be tagged. Keys are ISO 639-1 +# tags, which are the actual tags we use. Values are English names shown in the +# UI for convenience. +LANGUAGES = { + "ab": "Abkhazian", + "aa": "Afar", + "af": "Afrikaans", + "ak": "Akan", + "sq": "Albanian", + "am": "Amharic", + "ar": "Arabic", + "an": "Aragonese", + "hy": "Armenian", + "as": "Assamese", + "av": "Avaric", + "ae": "Avestan", + "ay": "Aymara", + "az": "Azerbaijani", + "bm": "Bambara", + "ba": "Bashkir", + "eu": "Basque", + "be": "Belarusian", + "bn": "Bengali", + "bi": "Bislama", + "bs": "Bosnian", + "br": "Breton", + "bg": "Bulgarian", + "my": "Burmese", + "ca": "Catalan, Valencian", + "ch": "Chamorro", + "ce": "Chechen", + "ny": "Chichewa, Chewa, Nyanja", + "zh": "Chinese", + "cu": "Church Slavic, Old Slavonic, Church Slavonic, Old Bulgarian, Old Church Slavonic", + "cv": "Chuvash", + "kw": "Cornish", + "co": "Corsican", + "cr": "Cree", + "hr": "Croatian", + "cs": "Czech", + "da": "Danish", + "dv": "Divehi, Dhivehi, Maldivian", + "nl": "Dutch, Flemish", + "dz": "Dzongkha", + "en": "English", + "eo": "Esperanto", + "et": "Estonian", + "ee": "Ewe", + "fo": "Faroese", + "fj": "Fijian", + "fi": "Finnish", + "fr": "French", + "fy": "Western Frisian", + "ff": "Fulah", + "gd": "Gaelic, Scottish Gaelic", + "gl": "Galician", + "lg": "Ganda", + "ka": "Georgian", + "de": "German", + "el": "Greek, Modern (1453–)", + "kl": "Kalaallisut, Greenlandic", + "gn": "Guarani", + "gu": "Gujarati", + "ht": "Haitian, Haitian Creole", + "ha": "Hausa", + "he": "Hebrew", + "hz": "Herero", + "hi": "Hindi", + "ho": "Hiri Motu", + "hu": "Hungarian", + "is": "Icelandic", + "io": "Ido", + "ig": "Igbo", + "id": "Indonesian", + "ia": "Interlingua (International Auxiliary Language Association)", + "ie": "Interlingue, Occidental", + "iu": "Inuktitut", + "ik": "Inupiaq", + "ga": "Irish", + "it": "Italian", + "ja": "Japanese", + "jv": "Javanese", + "kn": "Kannada", + "kr": "Kanuri", + "ks": "Kashmiri", + "kk": "Kazakh", + "km": "Central Khmer", + "ki": "Kikuyu, Gikuyu", + "rw": "Kinyarwanda", + "ky": "Kirghiz, Kyrgyz", + "kv": "Komi", + "kg": "Kongo", + "ko": "Korean", + "kj": "Kuanyama, Kwanyama", + "ku": "Kurdish", + "lo": "Lao", + "la": "Latin", + "lv": "Latvian", + "li": "Limburgan, Limburger, Limburgish", + "ln": "Lingala", + "lt": "Lithuanian", + "lu": "Luba-Katanga", + "lb": "Luxembourgish, Letzeburgesch", + "mk": "Macedonian", + "mg": "Malagasy", + "ms": "Malay", + "ml": "Malayalam", + "mt": "Maltese", + "gv": "Manx", + "mi": "Maori", + "mr": "Marathi", + "mh": "Marshallese", + "mn": "Mongolian", + "na": "Nauru", + "nv": "Navajo, Navaho", + "nd": "North Ndebele", + "nr": "South Ndebele", + "ng": "Ndonga", + "ne": "Nepali", + "no": "Norwegian", + "nb": "Norwegian Bokmål", + "nn": "Norwegian Nynorsk", + "ii": "Sichuan Yi, Nuosu", + "oc": "Occitan", + "oj": "Ojibwa", + "or": "Oriya", + "om": "Oromo", + "os": "Ossetian, Ossetic", + "pi": "Pali", + "ps": "Pashto, Pushto", + "fa": "Persian", + "pl": "Polish", + "pt": "Portuguese", + "pa": "Punjabi, Panjabi", + "qu": "Quechua", + "ro": "Romanian, Moldavian, Moldovan", + "rm": "Romansh", + "rn": "Rundi", + "ru": "Russian", + "se": "Northern Sami", + "sm": "Samoan", + "sg": "Sango", + "sa": "Sanskrit", + "sc": "Sardinian", + "sr": "Serbian", + "sn": "Shona", + "sd": "Sindhi", + "si": "Sinhala, Sinhalese", + "sk": "Slovak", + "sl": "Slovenian", + "so": "Somali", + "st": "Southern Sotho", + "es": "Spanish, Castilian", + "su": "Sundanese", + "sw": "Swahili", + "ss": "Swati", + "sv": "Swedish", + "tl": "Tagalog", + "ty": "Tahitian", + "tg": "Tajik", + "ta": "Tamil", + "tt": "Tatar", + "te": "Telugu", + "th": "Thai", + "bo": "Tibetan", + "ti": "Tigrinya", + "to": "Tonga (Tonga Islands)", + "ts": "Tsonga", + "tn": "Tswana", + "tr": "Turkish", + "tk": "Turkmen", + "tw": "Twi", + "ug": "Uighur, Uyghur", + "uk": "Ukrainian", + "ur": "Urdu", + "uz": "Uzbek", + "ve": "Venda", + "vi": "Vietnamese", + "vo": "Volapük", + "wa": "Walloon", + "cy": "Welsh", + "wo": "Wolof", + "xh": "Xhosa", + "yi": "Yiddish", + "yo": "Yoruba", + "za": "Zhuang, Chuang", + "zu": "Zulu", +} + def highlight(input): return "" + input + "" @@ -220,6 +426,7 @@ def __init__( original_task: Optional[bool] = None, choices_in_prompt: Optional[bool] = None, metrics: Optional[List[str]] = None, + languages: Optional[List[str]] = None, ): """ Initializes template metadata. @@ -233,10 +440,12 @@ def __init__( :param choices_in_prompt: If True, the answer choices are included in the templates such that models see those choices in the input. Only applicable to classification tasks. :param metrics: List of strings denoting metrics to use for evaluation + :param metrics: List of strings denoting languages used in the prompt (not the associated dataset!) """ self.original_task = original_task self.choices_in_prompt = choices_in_prompt self.metrics = metrics + self.languages = languages class TemplateCollection: @@ -496,6 +705,7 @@ def get_templates_data_frame(): "original_task": [], "choices_in_prompt": [], "metrics": [], + "languages": [], "answer_choices": [], "jinja": [], } @@ -514,6 +724,7 @@ def get_templates_data_frame(): data["original_task"].append(template.metadata.original_task) data["choices_in_prompt"].append(template.metadata.choices_in_prompt) data["metrics"].append(template.metadata.metrics) + data["languages"].append(template.metadata.languages) data["answer_choices"].append(template.get_answer_choices_expr()) data["jinja"].append(template.jinja) diff --git a/promptsource/templates/Zaid/coqa_expanded/templates.yaml b/promptsource/templates/Zaid/coqa_expanded/templates.yaml index 27c09adb2..24d95aec1 100644 --- a/promptsource/templates/Zaid/coqa_expanded/templates.yaml +++ b/promptsource/templates/Zaid/coqa_expanded/templates.yaml @@ -10,6 +10,8 @@ templates: \ %}\n{{answer[\"input_text\"]}}\n{% else %}\nunknown\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -24,6 +26,8 @@ templates: input_text\"]}}\n{% else %}\nunknown\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -38,6 +42,8 @@ templates: input_text\"]}}\n{% else %}\nunknown\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -52,6 +58,8 @@ templates: input_text\"]}}\n{% else %}\nunknown\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -82,6 +90,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -96,6 +106,8 @@ templates: ]}}\n{% else %}\nunknown\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -108,6 +120,8 @@ templates: \ -1 %}\n{{answer[\"input_text\"]}}\n{% else %}\nunknown\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/Zaid/quac_expanded/templates.yaml b/promptsource/templates/Zaid/quac_expanded/templates.yaml index c46ca7958..3d976e3a1 100644 --- a/promptsource/templates/Zaid/quac_expanded/templates.yaml +++ b/promptsource/templates/Zaid/quac_expanded/templates.yaml @@ -8,6 +8,8 @@ templates: Q: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -19,6 +21,8 @@ templates: jinja: "{{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -33,6 +37,8 @@ templates: \ {{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -46,6 +52,8 @@ templates: \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -59,6 +67,8 @@ templates: \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -72,6 +82,8 @@ templates: \ \nA: ||| {{answer[\"texts\"][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/acronym_identification/templates.yaml b/promptsource/templates/acronym_identification/templates.yaml index e64348b6b..9fae68d2c 100644 --- a/promptsource/templates/acronym_identification/templates.yaml +++ b/promptsource/templates/acronym_identification/templates.yaml @@ -41,6 +41,8 @@ templates: {% else %}\nUnclear\n{% endif %}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -62,6 +64,8 @@ templates: \ %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -121,6 +125,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -142,6 +148,8 @@ templates: \ answer_list.value|join(', ') }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -186,6 +194,8 @@ templates: \ %}\n{{item}} : {% if value!='' %}{{value}}{% else %}Unclear{% endif %}\n{%endfor%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -228,6 +238,8 @@ templates: \ %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml index 4c8be69a4..930dbdb06 100644 --- a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml +++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml @@ -10,6 +10,8 @@ templates: Is "{{text}}" related to adverse drug effect (ADE)? ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: \ be either \"Yes\" or \"No\".\n\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -36,6 +40,8 @@ templates: \ not related to adverse drug effect (ADE). \n{% endif %}\n|||\n{{text}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml index 3e26e5a53..ee815d774 100644 --- a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml +++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml @@ -17,6 +17,8 @@ templates: {{drug}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: {{text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -55,6 +59,8 @@ templates: {{effect}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +86,8 @@ templates: {{drug}} and {{effect}}.' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +116,8 @@ templates: {{drug}} and {{effect}}.' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml index f26df11e9..d39c973a1 100644 --- a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml +++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml @@ -17,6 +17,8 @@ templates: {{drug}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: {{text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -62,6 +66,8 @@ templates: {{drug}} and {{dosage}}.' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -83,6 +89,8 @@ templates: {{dosage}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -97,6 +105,8 @@ templates: \ and {{dosage}}." metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml b/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml index 90b95617e..89b74eae8 100644 --- a/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml +++ b/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml @@ -11,6 +11,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -31,6 +33,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -56,6 +60,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -77,6 +83,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -103,6 +111,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/adversarial_qa/dbert/templates.yaml b/promptsource/templates/adversarial_qa/dbert/templates.yaml index 9b839e6c6..6a2a33629 100644 --- a/promptsource/templates/adversarial_qa/dbert/templates.yaml +++ b/promptsource/templates/adversarial_qa/dbert/templates.yaml @@ -11,6 +11,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -31,6 +33,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -56,6 +60,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -77,6 +83,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -103,6 +111,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/adversarial_qa/dbidaf/templates.yaml b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml index b38a69903..047946f71 100644 --- a/promptsource/templates/adversarial_qa/dbidaf/templates.yaml +++ b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml @@ -17,6 +17,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -43,6 +45,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -58,6 +62,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -78,6 +84,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -103,6 +111,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/adversarial_qa/droberta/templates.yaml b/promptsource/templates/adversarial_qa/droberta/templates.yaml index 1ab9886ba..ef3a2ef6f 100644 --- a/promptsource/templates/adversarial_qa/droberta/templates.yaml +++ b/promptsource/templates/adversarial_qa/droberta/templates.yaml @@ -11,6 +11,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -31,6 +33,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -56,6 +60,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -77,6 +83,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -103,6 +111,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/aeslc/templates.yaml b/promptsource/templates/aeslc/templates.yaml index 50865c9a8..b42a26380 100755 --- a/promptsource/templates/aeslc/templates.yaml +++ b/promptsource/templates/aeslc/templates.yaml @@ -12,6 +12,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -30,6 +32,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -48,6 +52,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -66,6 +72,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -84,6 +92,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -102,6 +112,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -120,6 +132,8 @@ templates: {{ subject_line }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other @@ -139,6 +153,8 @@ templates: {{ subject_line }} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - Other diff --git a/promptsource/templates/ag_news/templates.yaml b/promptsource/templates/ag_news/templates.yaml index 9ef8a9043..cdfdf6acb 100644 --- a/promptsource/templates/ag_news/templates.yaml +++ b/promptsource/templates/ag_news/templates.yaml @@ -7,6 +7,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: \ or science and technology\"}}?\n{{text}} \n||| \n{{answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -38,6 +42,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -51,6 +57,8 @@ templates: }}, or {{\"Science and Technology\"}}? ||| \n{{answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -63,6 +71,8 @@ templates: \ in? ||| \n{{answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -75,6 +85,8 @@ templates: \ business, or science and technology\"}}? ||| \n{{answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -87,6 +99,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml b/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml index ee38b4c40..5c4950d91 100644 --- a/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml +++ b/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml @@ -25,6 +25,8 @@ templates: {% endfor %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -39,6 +41,8 @@ templates: \ %}\n- {{letter}}: {{t}}\n {% endfor %}|||{{answerKey}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -57,6 +61,8 @@ templates: {{answer_choices[choices["label"].index(answerKey)]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +83,8 @@ templates: {{answer_choices[choices["label"].index(answerKey)]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -98,6 +106,8 @@ templates: {{answer_choices[choices["label"].index(answerKey)]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -123,6 +133,8 @@ templates: {{answerKey}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml b/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml index 54e7f3780..b7c3a7b3c 100644 --- a/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml +++ b/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml @@ -21,6 +21,8 @@ templates: {{answerKey}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +41,8 @@ templates: {{answer_choices[choices["label"].index(answerKey)]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +63,8 @@ templates: {{answer_choices[choices["label"].index(answerKey)]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +86,8 @@ templates: {{answer_choices[choices["label"].index(answerKey)]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +117,8 @@ templates: {% endfor %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -123,6 +133,8 @@ templates: \ %}\n- {{letter}}: {{t}}\n {% endfor %}|||{{answerKey}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/amazon_polarity/templates.yaml b/promptsource/templates/amazon_polarity/templates.yaml index a9cd72797..4558071be 100644 --- a/promptsource/templates/amazon_polarity/templates.yaml +++ b/promptsource/templates/amazon_polarity/templates.yaml @@ -12,6 +12,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -51,6 +55,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +74,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +93,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -106,6 +116,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{answer_choices[label]}} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -147,6 +161,8 @@ templates: {{answer_choices[label]}} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -167,6 +183,8 @@ templates: {{answer_choices[label]}} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/amazon_reviews_multi/en/templates.yaml b/promptsource/templates/amazon_reviews_multi/en/templates.yaml index 93b4fca6c..ede73254b 100644 --- a/promptsource/templates/amazon_reviews_multi/en/templates.yaml +++ b/promptsource/templates/amazon_reviews_multi/en/templates.yaml @@ -13,6 +13,8 @@ templates: {{review_title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -32,6 +34,8 @@ templates: {{answer_choices[stars-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -51,6 +55,8 @@ templates: {{answer_choices[stars-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -70,6 +76,8 @@ templates: {{answer_choices[stars-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -88,6 +96,8 @@ templates: {{product_category}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -102,12 +112,13 @@ templates: === - {{review_title}}. {{review_body}} - Product category: {{product_category}}||| + {{review_title}}. {{review_body}} Product category: {{product_category}}||| {{answer_choices[stars-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -126,6 +137,8 @@ templates: {{product_category}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml b/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml index cf6cc0de6..a3bc39ee9 100644 --- a/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml +++ b/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml @@ -8,6 +8,8 @@ templates: \ \n|||\n{{review_headline}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -21,6 +23,8 @@ templates: about this product {{product_title}}. ||| {{review_body}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -35,6 +39,8 @@ templates: n- ') }} \n(1 being lowest and 5 being highest)\n|||\n{{answer_choices[star_rating-1]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -48,6 +54,8 @@ templates: \ \n{{review_headline}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -62,6 +70,8 @@ templates: \ | join('\\n- ') }} \n|||\n{{answer_choices[star_rating-1]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/ambig_qa/light/templates.yaml b/promptsource/templates/ambig_qa/light/templates.yaml index c98368a24..6b5b5543f 100644 --- a/promptsource/templates/ambig_qa/light/templates.yaml +++ b/promptsource/templates/ambig_qa/light/templates.yaml @@ -18,6 +18,8 @@ templates: \ output the same question.\n|||\n{{selected_question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - Edit Distance @@ -41,6 +43,8 @@ templates: \ | join(\"; \")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -62,6 +66,8 @@ templates: \ | join(\"; \")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -86,6 +92,8 @@ templates: \ %} {{answer_choices[0]}} {% else %} {{answer_choices[1]}} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -110,6 +118,8 @@ templates: {{selected_question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/anli/templates.yaml b/promptsource/templates/anli/templates.yaml index b10a0eab6..ab6e2ee08 100644 --- a/promptsource/templates/anli/templates.yaml +++ b/promptsource/templates/anli/templates.yaml @@ -8,6 +8,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +52,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +67,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +81,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +97,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -97,6 +111,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +125,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +140,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -134,6 +154,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -146,6 +168,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -159,6 +183,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -172,6 +198,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -184,6 +212,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/app_reviews/templates.yaml b/promptsource/templates/app_reviews/templates.yaml index 08cd8fdbb..d92249d11 100644 --- a/promptsource/templates/app_reviews/templates.yaml +++ b/promptsource/templates/app_reviews/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer_choices[star-1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Spearman Correlation @@ -30,6 +32,8 @@ templates: {{review}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Accuracy - Spearman Correlation @@ -45,6 +49,8 @@ templates: {{answer_choices[star-1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Spearman Correlation @@ -62,6 +68,8 @@ templates: {{star}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Spearman Correlation diff --git a/promptsource/templates/aqua_rat/raw/templates.yaml b/promptsource/templates/aqua_rat/raw/templates.yaml index 395c0535b..19484d09f 100644 --- a/promptsource/templates/aqua_rat/raw/templates.yaml +++ b/promptsource/templates/aqua_rat/raw/templates.yaml @@ -9,6 +9,8 @@ templates: \ is\n |||\n{{correct}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -45,6 +47,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -58,6 +62,8 @@ templates: {%endfor%}\n|||\n{{rationale}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - BLEU - ROUGE @@ -85,6 +91,8 @@ templates: {{correct}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -98,6 +106,8 @@ templates: {%endfor%}\n||| \n{{correct}}\n\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -111,6 +121,8 @@ templates: \ the above question:\n|||\n{{rationale}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/art/templates.yaml b/promptsource/templates/art/templates.yaml index 51fb519ff..067e13ddb 100755 --- a/promptsource/templates/art/templates.yaml +++ b/promptsource/templates/art/templates.yaml @@ -10,6 +10,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +41,8 @@ templates: {{ answer_choices[label-1] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +71,8 @@ templates: {{ answer_choices[label-1] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +88,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +124,8 @@ templates: {{ answer_choices[label-1] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/asnq/templates.yaml b/promptsource/templates/asnq/templates.yaml index 386901418..29e9e8185 100644 --- a/promptsource/templates/asnq/templates.yaml +++ b/promptsource/templates/asnq/templates.yaml @@ -8,6 +8,8 @@ templates: \ the question? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +23,8 @@ templates: {% endif %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +50,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -59,6 +65,8 @@ templates: ? ||| {{answer_choices[label]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +79,8 @@ templates: ? ||| {{answer_choices[label]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -95,6 +105,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -121,6 +133,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -143,6 +157,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -156,6 +172,8 @@ templates: \ \ please answer yes or no. ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -169,6 +187,8 @@ templates: \ %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -182,6 +202,8 @@ templates: following question : {{question}} ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/asset/ratings/templates.yaml b/promptsource/templates/asset/ratings/templates.yaml index 30169c5ad..248ff0055 100644 --- a/promptsource/templates/asset/ratings/templates.yaml +++ b/promptsource/templates/asset/ratings/templates.yaml @@ -12,6 +12,8 @@ templates: \n{{questions[aspect]}}. Please answer Yes or No. \n|||\n{{answer_choices[label]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -37,6 +39,8 @@ templates: {{rating}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -87,6 +91,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -103,6 +109,8 @@ templates: \ {{statements[aspect]}} \n\n|||\n{{rating}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/asset/simplification/templates.yaml b/promptsource/templates/asset/simplification/templates.yaml index 91182e838..00cb18170 100644 --- a/promptsource/templates/asset/simplification/templates.yaml +++ b/promptsource/templates/asset/simplification/templates.yaml @@ -11,6 +11,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -40,6 +42,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -83,6 +87,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -125,6 +131,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -150,6 +158,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/banking77/templates.yaml b/promptsource/templates/banking77/templates.yaml index 36c1e7441..12fd8ae9f 100644 --- a/promptsource/templates/banking77/templates.yaml +++ b/promptsource/templates/banking77/templates.yaml @@ -33,6 +33,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +72,8 @@ templates: ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -114,6 +118,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -148,6 +154,8 @@ templates: |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -193,6 +201,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -230,6 +240,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -267,6 +279,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/billsum/templates.yaml b/promptsource/templates/billsum/templates.yaml index 149471183..1ca14aa92 100644 --- a/promptsource/templates/billsum/templates.yaml +++ b/promptsource/templates/billsum/templates.yaml @@ -12,6 +12,8 @@ templates: {{text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -25,6 +27,8 @@ templates: \ one sentence.\n|||\n{{title}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -43,6 +47,8 @@ templates: {{text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -61,6 +67,8 @@ templates: {{text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -79,6 +87,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -95,6 +105,8 @@ templates: {{title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -111,6 +123,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -129,6 +143,8 @@ templates: {{title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/bing_coronavirus_query_set/templates.yaml b/promptsource/templates/bing_coronavirus_query_set/templates.yaml index 3cb188848..0e0b45121 100644 --- a/promptsource/templates/bing_coronavirus_query_set/templates.yaml +++ b/promptsource/templates/bing_coronavirus_query_set/templates.yaml @@ -7,6 +7,8 @@ templates: \ In what country was it issued? \n{{Query}}\n|||\n{{Country}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -17,10 +19,11 @@ templates: id: 68f9c063-1907-4866-ab1b-756cc57e5695 jinja: "The user is searching for coronavirus results on Bing.com. Is the intent\ \ implicit or explicit? \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\"\ - \ %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n{% endif\ - \ %}" + \ %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -31,10 +34,12 @@ templates: id: 992d541f-9e0c-466d-b4c4-92e9e236f863 jinja: "This search query about coronavirus was issued in {{Country}} on {{Date}}.\ \ Is the intent implicit or explicit? \n{{Query}}\n|||\n{% if IsImplicitIntent\ - \ == \"True\" %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1]\ - \ }}\n{% endif %}" + \ == \"True\" %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n\ + {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -44,10 +49,12 @@ templates: answer_choices: Yes ||| No id: df53652c-36dc-45fe-a015-d0781e32cd33 jinja: "Does this search engine query have an indirect relation to Covid-19? \n\ - {{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\n{{answer_choices[0]\ - \ }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}" + {{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\n{{answer_choices[0] }}\n\ + {% else %}\n{{answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -61,6 +68,8 @@ templates: \ }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/biosses/templates.yaml b/promptsource/templates/biosses/templates.yaml index d5ec21158..cc5363a99 100644 --- a/promptsource/templates/biosses/templates.yaml +++ b/promptsource/templates/biosses/templates.yaml @@ -13,6 +13,8 @@ templates: {{(((5*score)|round)/5)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -26,6 +28,8 @@ templates: ||| {{(((5*score)|round)/5)}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -39,6 +43,8 @@ templates: \ the two sentences? |||\n\n{{(((5*score)|round)/5)}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -57,6 +63,8 @@ templates: {{(((5*score)|round)/5)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -75,6 +83,8 @@ templates: {{(((5*score)|round)/5)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -87,6 +97,8 @@ templates: \ the same information? |||\n\n{{answer_choices[0 if score < 2.5 else 1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -105,6 +117,8 @@ templates: {{(((5*score)|round)/5)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -118,6 +132,8 @@ templates: 1]}} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -130,6 +146,8 @@ templates: if score < 2.5 else 1]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -145,6 +163,8 @@ templates: {{(((5*score)|round)/5)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -157,6 +177,8 @@ templates: ||| {{answer_choices[0 if score < 2.5 else 1]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml b/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml index 46da7f761..8c8ae9d3d 100644 --- a/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml +++ b/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml @@ -9,6 +9,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - AUC @@ -22,6 +24,8 @@ templates: |||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - AUC @@ -35,6 +39,8 @@ templates: Answer: \n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - AUC @@ -47,6 +53,8 @@ templates: jinja: The genre of the book "{{title}}" is ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - AUC diff --git a/promptsource/templates/blended_skill_talk/templates.yaml b/promptsource/templates/blended_skill_talk/templates.yaml index fcff2560c..187a600f6 100644 --- a/promptsource/templates/blended_skill_talk/templates.yaml +++ b/promptsource/templates/blended_skill_talk/templates.yaml @@ -10,6 +10,8 @@ templates: \nB: \n|||\n{{guided_messages[-1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -29,6 +31,8 @@ templates: |||\nNo.\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -43,6 +47,8 @@ templates: \nPerson B: {{message_g}}\n{% endfor %} \n|||\n{{previous_utterance[0]}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/cbt/CN/templates.yaml b/promptsource/templates/cbt/CN/templates.yaml index bf019b6c8..dad740f2a 100644 --- a/promptsource/templates/cbt/CN/templates.yaml +++ b/promptsource/templates/cbt/CN/templates.yaml @@ -14,6 +14,8 @@ templates: {{ question.replace("XXXXX", answer) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -35,6 +37,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,10 +48,12 @@ templates: answer_choices: '{{options|join(''|||'')}}' id: 556ee207-18c9-4c6c-860a-8ea09b93505c jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\ - , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n{{answer_choices|join(\"\ - , \")}}\n|||\n{{ answer }}" + , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\ + {{answer_choices|join(\", \")}}\n|||\n{{ answer }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +72,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +94,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -106,6 +116,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/cbt/NE/templates.yaml b/promptsource/templates/cbt/NE/templates.yaml index d1f68a5fd..290d2c2b4 100644 --- a/promptsource/templates/cbt/NE/templates.yaml +++ b/promptsource/templates/cbt/NE/templates.yaml @@ -14,6 +14,8 @@ templates: {{ question.replace("XXXXX", answer) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -35,6 +37,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,10 +48,12 @@ templates: answer_choices: '{{options|join(''|||'')}}' id: 556ee207-18c9-4c6c-860a-8ea09b9350bb jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\ - , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n{{answer_choices|join(\"\ - , \")}}\n|||\n{{ answer }}" + , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\ + {{answer_choices|join(\", \")}}\n|||\n{{ answer }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +72,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +94,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -106,6 +116,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/cbt/P/templates.yaml b/promptsource/templates/cbt/P/templates.yaml index 848257bd4..74543c35a 100644 --- a/promptsource/templates/cbt/P/templates.yaml +++ b/promptsource/templates/cbt/P/templates.yaml @@ -14,6 +14,8 @@ templates: {{ question.replace("XXXXX", answer) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -35,6 +37,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,10 +48,12 @@ templates: answer_choices: '{{options|join(''|||'')}}' id: 556ee207-18c9-4c6c-860a-8ea09b93505a jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\ - , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n{{answer_choices|join(\"\ - , \")}}\n|||\n{{ answer }}" + , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\ + {{answer_choices|join(\", \")}}\n|||\n{{ answer }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +72,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +94,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -106,6 +116,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/cbt/V/templates.yaml b/promptsource/templates/cbt/V/templates.yaml index 3507fe6cb..10a1c0af3 100644 --- a/promptsource/templates/cbt/V/templates.yaml +++ b/promptsource/templates/cbt/V/templates.yaml @@ -14,6 +14,8 @@ templates: {{ question.replace("XXXXX", answer) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -35,6 +37,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,10 +48,12 @@ templates: answer_choices: '{{options|join(''|||'')}}' id: 556ee207-18c9-4c6c-860a-8ea09b9350cc jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\ - , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n{{answer_choices|join(\"\ - , \")}}\n|||\n{{ answer }}" + , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\ + {{answer_choices|join(\", \")}}\n|||\n{{ answer }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +72,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +94,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -106,6 +116,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/cbt/raw/templates.yaml b/promptsource/templates/cbt/raw/templates.yaml index e180f79dd..56ae14a2b 100644 --- a/promptsource/templates/cbt/raw/templates.yaml +++ b/promptsource/templates/cbt/raw/templates.yaml @@ -12,6 +12,8 @@ templates: {{title.split(''___'')[0]|replace(''_'','' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -27,6 +29,8 @@ templates: {{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -43,6 +47,8 @@ templates: {{title.split(''___'')[0]|replace(''_'','' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -61,6 +67,8 @@ templates: {{title.split(''___'')[0]|replace(''_'','' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -79,6 +87,8 @@ templates: {{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/cc_news/templates.yaml b/promptsource/templates/cc_news/templates.yaml index fd64cddbc..41f12fa3d 100755 --- a/promptsource/templates/cc_news/templates.yaml +++ b/promptsource/templates/cc_news/templates.yaml @@ -17,6 +17,8 @@ templates: {{text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -30,6 +32,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -49,6 +53,8 @@ templates: {{ title }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -74,6 +80,8 @@ templates: {{ text }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -87,6 +95,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -108,6 +118,8 @@ templates: {{ text }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -120,6 +132,8 @@ templates: jinja: "Choose a title for the text below: \n\n{{ text }}\n|||\n{{ title }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -139,6 +153,8 @@ templates: {{ description }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -152,6 +168,8 @@ templates: \ title }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -165,6 +183,8 @@ templates: \ }}\n|||\n{{ description }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -190,6 +210,8 @@ templates: {{ text }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -202,6 +224,8 @@ templates: jinja: "Give this text a title: \n\n{{ text }}\n|||\n{{ title }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -215,6 +239,8 @@ templates: {{ description }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -228,6 +254,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/circa/templates.yaml b/promptsource/templates/circa/templates.yaml index 75a28755b..e519f2894 100644 --- a/promptsource/templates/circa/templates.yaml +++ b/promptsource/templates/circa/templates.yaml @@ -12,6 +12,8 @@ templates: {{canquestion_X}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - BLEU @@ -34,6 +36,8 @@ templates: Y: {{answer_Y}} |||\n\n{{ answer_choices[goldstandard2]}}\n\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -48,6 +52,8 @@ templates: {{question_X}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -65,6 +71,8 @@ templates: \ %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -84,6 +92,8 @@ templates: Y: {{answer_Y}} |||\n\n{{ answer_choices[goldstandard1]}}\n\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/climate_fever/templates.yaml b/promptsource/templates/climate_fever/templates.yaml index aaf858ae2..82faba1b8 100644 --- a/promptsource/templates/climate_fever/templates.yaml +++ b/promptsource/templates/climate_fever/templates.yaml @@ -17,6 +17,8 @@ templates: {{ answer_choices[evidences[0]["evidence_label"]] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +45,8 @@ templates: {{ answer_choices[claim_label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +70,8 @@ templates: {{ answer_choices[evidences[4]["evidence_label"]] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -81,6 +87,8 @@ templates: {{ answer_choices[evidences[3]["evidence_label"]] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -101,6 +109,8 @@ templates: {{ answer_choices[evidences[2]["evidence_label"]] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -119,6 +129,8 @@ templates: {{ answer_choices[evidences[1]["evidence_label"]] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -141,6 +153,8 @@ templates: {{ answer_choices[evidences[2]["evidence_label"]] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml b/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml index 02a6d5d19..a0f12d17c 100644 --- a/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml +++ b/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml @@ -12,6 +12,8 @@ templates: {{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -29,6 +31,8 @@ templates: {{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -47,6 +51,8 @@ templates: {{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -65,6 +71,8 @@ templates: {{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -83,6 +91,8 @@ templates: {{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -100,6 +110,8 @@ templates: {{article}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -112,6 +124,8 @@ templates: jinja: 'Sum the following article in brief: {{article}}|||{{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -130,6 +144,8 @@ templates: {{highlights}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -148,6 +164,8 @@ templates: {{article}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/codah/codah/templates.yaml b/promptsource/templates/codah/codah/templates.yaml index 4366dc3f5..1629de032 100644 --- a/promptsource/templates/codah/codah/templates.yaml +++ b/promptsource/templates/codah/codah/templates.yaml @@ -24,6 +24,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +54,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +84,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +145,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -151,8 +161,7 @@ templates: Completion: {{ candidate_answers[correct_answer_idx] }} - Which of {{answer_choices | join(", ")}} - best describes the completed sentence? + Which of {{answer_choices | join(", ")}} best describes the completed sentence? ||| @@ -160,6 +169,8 @@ templates: {{answer_choices[question_category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -184,6 +195,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/codah/fold_0/templates.yaml b/promptsource/templates/codah/fold_0/templates.yaml index 94540996b..ffe5d78dc 100644 --- a/promptsource/templates/codah/fold_0/templates.yaml +++ b/promptsource/templates/codah/fold_0/templates.yaml @@ -24,6 +24,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +54,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +84,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +145,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +170,8 @@ templates: {{answer_choices[question_category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -184,6 +196,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/codah/fold_1/templates.yaml b/promptsource/templates/codah/fold_1/templates.yaml index 92f6c609a..d5e2ce3f9 100644 --- a/promptsource/templates/codah/fold_1/templates.yaml +++ b/promptsource/templates/codah/fold_1/templates.yaml @@ -24,6 +24,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +54,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +84,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +145,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +170,8 @@ templates: {{answer_choices[question_category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -184,6 +196,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/codah/fold_2/templates.yaml b/promptsource/templates/codah/fold_2/templates.yaml index 688f81344..83a6d1856 100644 --- a/promptsource/templates/codah/fold_2/templates.yaml +++ b/promptsource/templates/codah/fold_2/templates.yaml @@ -24,6 +24,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +54,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +84,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +145,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +170,8 @@ templates: {{answer_choices[question_category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -184,8 +196,10 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true name: interrogative_instruction_before_sentence_and_choices - reference: '' \ No newline at end of file + reference: '' diff --git a/promptsource/templates/codah/fold_3/templates.yaml b/promptsource/templates/codah/fold_3/templates.yaml index 996b2ad9d..1be1555b1 100644 --- a/promptsource/templates/codah/fold_3/templates.yaml +++ b/promptsource/templates/codah/fold_3/templates.yaml @@ -24,6 +24,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +54,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +84,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +145,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +170,8 @@ templates: {{answer_choices[question_category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -184,8 +196,10 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true name: interrogative_instruction_before_sentence_and_choices - reference: '' \ No newline at end of file + reference: '' diff --git a/promptsource/templates/codah/fold_4/templates.yaml b/promptsource/templates/codah/fold_4/templates.yaml index 442248238..9619b9ed7 100644 --- a/promptsource/templates/codah/fold_4/templates.yaml +++ b/promptsource/templates/codah/fold_4/templates.yaml @@ -24,6 +24,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +54,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +84,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +145,8 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +170,8 @@ templates: {{answer_choices[question_category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -184,8 +196,10 @@ templates: {{ answer_choices[correct_answer_idx] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true name: interrogative_instruction_before_sentence_and_choices - reference: '' \ No newline at end of file + reference: '' diff --git a/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml b/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml index e8459be6e..9ef7977fe 100644 --- a/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml +++ b/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml @@ -18,6 +18,8 @@ templates: \ |||\n {{code}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - BLEU diff --git a/promptsource/templates/common_gen/templates.yaml b/promptsource/templates/common_gen/templates.yaml index 7d8f8b042..eeb096797 100644 --- a/promptsource/templates/common_gen/templates.yaml +++ b/promptsource/templates/common_gen/templates.yaml @@ -7,6 +7,8 @@ templates: Generate a sentence with all the concepts :\n|||\n{{target}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -24,6 +26,8 @@ templates: {{target}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -37,6 +41,8 @@ templates: \ Use {{concepts | join(\", \")}} to restrict the output sentence.\n|||\n{{target}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -52,6 +58,8 @@ templates: {{target}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -69,6 +77,8 @@ templates: {{ concepts | join(", ") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -82,6 +92,8 @@ templates: {{ concepts | join(\", \") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -98,6 +110,8 @@ templates: {{target}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -113,6 +127,8 @@ templates: \ a simple sentence can be \n|||\n{{target}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -126,6 +142,8 @@ templates: \ a sentence with all the concepts :\n|||\n{{target}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/commonsense_qa/templates.yaml b/promptsource/templates/commonsense_qa/templates.yaml index de73c748a..974c03a2e 100644 --- a/promptsource/templates/commonsense_qa/templates.yaml +++ b/promptsource/templates/commonsense_qa/templates.yaml @@ -14,6 +14,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +36,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -64,6 +68,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -87,6 +93,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -111,6 +119,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/conv_ai/templates.yaml b/promptsource/templates/conv_ai/templates.yaml index b2ca2b4e5..793f25a79 100644 --- a/promptsource/templates/conv_ai/templates.yaml +++ b/promptsource/templates/conv_ai/templates.yaml @@ -23,7 +23,8 @@ templates: {% if "Alice" == eval["userId"] %} - {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %} @@ -32,6 +33,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -44,7 +47,8 @@ templates: %} "{{context}}" - Given the previous context, would you say Bob''s engagement is real in this conversation: + Given the previous context, would you say Bob''s engagement is real in this + conversation: {% for utterance in thread %} @@ -58,7 +62,8 @@ templates: {% if "Bob" == eval["userId"] %} - {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %} @@ -67,6 +72,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -93,11 +100,14 @@ templates: ||| - {% if alice == bob %}{{ answer_choices[2] }}{% elif alice < bob %}{{ answer_choices[1] }}{% else %}{{ answer_choices[0] }}{% endif %} + {% if alice == bob %}{{ answer_choices[2] }}{% elif alice < bob %}{{ answer_choices[1] + }}{% else %}{{ answer_choices[0] }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -110,8 +120,8 @@ templates: %}{{context}} - Given the preceding context, do Bob''s following utterances show that he was interested - in the context? + Given the preceding context, do Bob''s following utterances show that he was + interested in the context? {% for utterance in thread %} @@ -127,7 +137,8 @@ templates: {% if "Bob" == eval["userId"] %} - {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %} @@ -136,6 +147,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -165,7 +178,8 @@ templates: {% if "Alice" == eval["userId"] %} - {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %} @@ -174,6 +188,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -186,25 +202,28 @@ templates: %} {% set bob = (evaluation|selectattr("userId", "equalto", "Bob")|first)["engagement"] %} {% if (0 < (thread | selectattr("userId", "equalto", "Bob") | list | length)) and (0 < (thread | selectattr("userId", "equalto", "Alice") | list | length)) - %} - Context: {{context}} + %} Context: {{context}} Conversation: {% for utterance in thread %} {{ utterance["userId"] }}: {{ utterance["text"] }} {% endfor %} - - Given the previous context and conversation, who do you think is more engaged in this conversation - ({{ answer_choices[0] }}, {{ answer_choices[1] }}, or {{ answer_choices[2] }}): + + Given the previous context and conversation, who do you think is more engaged + in this conversation ({{ answer_choices[0] }}, {{ answer_choices[1] }}, or {{ + answer_choices[2] }}): ||| - {% if alice == bob %}{{ answer_choices[2] }}{% elif alice < bob %}{{ answer_choices[1] }}{% else %}{{ answer_choices[0] }}{% endif %} + {% if alice == bob %}{{ answer_choices[2] }}{% elif alice < bob %}{{ answer_choices[1] + }}{% else %}{{ answer_choices[0] }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/conv_ai_2/templates.yaml b/promptsource/templates/conv_ai_2/templates.yaml index cdd662bb3..fda9271e7 100644 --- a/promptsource/templates/conv_ai_2/templates.yaml +++ b/promptsource/templates/conv_ai_2/templates.yaml @@ -27,11 +27,14 @@ templates: in a conversation?||| - {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -66,11 +69,14 @@ templates: What''s your guess: {{ answer_choices[0] }} or {{ answer_choices[1] }}?||| - {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -103,11 +109,14 @@ templates: Do you agree?||| - {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -138,11 +147,14 @@ templates: {{ answer_choices[0] }} of {{ answer_choices[1] }}?||| - {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -175,11 +187,14 @@ templates: from a person in a conversation?||| - {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{% endif %} + {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] + }}{% endif %} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/conv_ai_3/templates.yaml b/promptsource/templates/conv_ai_3/templates.yaml index 7b1a78cbe..1b5c97bd1 100644 --- a/promptsource/templates/conv_ai_3/templates.yaml +++ b/promptsource/templates/conv_ai_3/templates.yaml @@ -8,6 +8,8 @@ templates: %}{{ answer_choices[1] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -26,6 +28,8 @@ templates: {{ answer_choices[clarification_need-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -39,6 +43,8 @@ templates: else %}{{ answer_choices[1] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -52,6 +58,8 @@ templates: %}{{ answer_choices[1] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -65,6 +73,8 @@ templates: }}{% else %}{{ answer_choices[1] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -87,6 +97,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -105,6 +117,8 @@ templates: {{ answer_choices[clarification_need-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/coqa/templates.yaml b/promptsource/templates/coqa/templates.yaml index d46dc7610..0d1d61ef5 100644 --- a/promptsource/templates/coqa/templates.yaml +++ b/promptsource/templates/coqa/templates.yaml @@ -8,6 +8,8 @@ templates: ][0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -41,6 +43,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -66,6 +70,8 @@ templates: {% endfor %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -85,6 +91,8 @@ templates: {{story[answers["answer_start"][0] : answers["answer_end"][0] ]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -100,6 +108,8 @@ templates: ||| \n{{answers[\"input_text\"][missing_idx]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/cord19/metadata/templates.yaml b/promptsource/templates/cord19/metadata/templates.yaml index 1bd974b45..04c0a3f63 100644 --- a/promptsource/templates/cord19/metadata/templates.yaml +++ b/promptsource/templates/cord19/metadata/templates.yaml @@ -11,6 +11,8 @@ templates: {{ title }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -24,6 +26,8 @@ templates: \ paper on Covid-19 with the previous title |||\n {{ abstract }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -41,6 +45,8 @@ templates: {{ abstract }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -54,6 +60,8 @@ templates: \ on Covid-19?\nTitle:|||\n{{ title }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -71,6 +79,8 @@ templates: {{ abstract }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/cos_e/v1.0/templates.yaml b/promptsource/templates/cos_e/v1.0/templates.yaml index 776190b56..43656a604 100644 --- a/promptsource/templates/cos_e/v1.0/templates.yaml +++ b/promptsource/templates/cos_e/v1.0/templates.yaml @@ -17,6 +17,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -31,6 +33,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -51,6 +55,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -73,6 +79,8 @@ templates: {{ abstractive_explanation }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -97,6 +105,8 @@ templates: {{ abstractive_explanation }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -110,6 +120,8 @@ templates: \nThe rationale to choose \"{{answer}}\" as the answer is that: |||\n{{abstractive_explanation}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -134,6 +146,8 @@ templates: {{ answer_choices[choices.index(answer)] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -155,6 +169,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -180,6 +196,8 @@ templates: {{ answer_choices[choices.index(answer)] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -202,6 +220,8 @@ templates: {{ abstractive_explanation }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -216,6 +236,8 @@ templates: \ aligned with human common sense? \n|||\n{{ abstractive_explanation }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/cos_e/v1.11/templates.yaml b/promptsource/templates/cos_e/v1.11/templates.yaml index d0a15dfbe..2c2cd8254 100644 --- a/promptsource/templates/cos_e/v1.11/templates.yaml +++ b/promptsource/templates/cos_e/v1.11/templates.yaml @@ -17,6 +17,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +47,8 @@ templates: \nThe rationale to choose \"{{answer}}\" as the answer is that: |||\n{{abstractive_explanation}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -64,6 +70,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: \ aligned with human common sense? \n|||\n{{ abstractive_explanation }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -103,6 +113,8 @@ templates: {{ answer_choices[choices.index(answer)] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -125,6 +137,8 @@ templates: {{ abstractive_explanation }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -148,6 +162,8 @@ templates: {{ abstractive_explanation }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -170,6 +186,8 @@ templates: {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -193,6 +211,8 @@ templates: {{ abstractive_explanation }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -217,6 +237,8 @@ templates: {{ answer_choices[choices.index(answer)] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/cosmos_qa/templates.yaml b/promptsource/templates/cosmos_qa/templates.yaml index 9e417bfb5..5fe9c1b39 100644 --- a/promptsource/templates/cosmos_qa/templates.yaml +++ b/promptsource/templates/cosmos_qa/templates.yaml @@ -9,6 +9,8 @@ templates: \ %}\n{{answer3}}\n{% endif %}\n|||\n{{question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -23,6 +25,8 @@ templates: \ | join(\"\\n - \") }}\n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +48,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +64,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +90,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -107,6 +117,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -145,6 +159,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -176,6 +192,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -203,6 +221,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -222,6 +242,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -244,6 +266,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -255,6 +279,8 @@ templates: jinja: "{{question}} \n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/covid_qa_castorini/templates.yaml b/promptsource/templates/covid_qa_castorini/templates.yaml index 120053ba1..cc3f84fe5 100644 --- a/promptsource/templates/covid_qa_castorini/templates.yaml +++ b/promptsource/templates/covid_qa_castorini/templates.yaml @@ -12,6 +12,8 @@ templates: {{question_query}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -29,6 +31,8 @@ templates: {{keyword_query}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +50,8 @@ templates: {{question_query}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/craffel/openai_lambada/templates.yaml b/promptsource/templates/craffel/openai_lambada/templates.yaml index 6e391170b..86e7ab904 100644 --- a/promptsource/templates/craffel/openai_lambada/templates.yaml +++ b/promptsource/templates/craffel/openai_lambada/templates.yaml @@ -9,6 +9,8 @@ templates: What comes after the ellipses? ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -37,6 +41,8 @@ templates: Fill in the ____: ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -51,6 +57,8 @@ templates: {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +73,8 @@ templates: {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/craigslist_bargains/templates.yaml b/promptsource/templates/craigslist_bargains/templates.yaml index d29d42000..76d9362c8 100644 --- a/promptsource/templates/craigslist_bargains/templates.yaml +++ b/promptsource/templates/craigslist_bargains/templates.yaml @@ -53,6 +53,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -112,6 +114,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -134,6 +138,8 @@ templates: ${{(agent_info[''Target''][0] - agent_info[''Target''][1]) | abs}}0' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -194,6 +200,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -264,6 +272,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -301,6 +311,8 @@ templates: {{nonempty_utterance[-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/crows_pairs/templates.yaml b/promptsource/templates/crows_pairs/templates.yaml index 0e25c008e..85d01eac6 100644 --- a/promptsource/templates/crows_pairs/templates.yaml +++ b/promptsource/templates/crows_pairs/templates.yaml @@ -16,6 +16,8 @@ templates: {{answer_choices[stereo_antistereo]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -40,6 +42,8 @@ templates: {{answer_choices[stereo_antistereo]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -63,6 +67,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -84,6 +90,8 @@ templates: {{answer_choices[stereo_antistereo]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -105,6 +113,8 @@ templates: {{answer_choices[stereo_antistereo]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -128,6 +138,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -149,6 +161,8 @@ templates: {{answer_choices[stereo_antistereo]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -172,6 +186,8 @@ templates: {{answer_choices[bias_type]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/dbpedia_14/templates.yaml b/promptsource/templates/dbpedia_14/templates.yaml index 651b7eefd..2022b54dd 100644 --- a/promptsource/templates/dbpedia_14/templates.yaml +++ b/promptsource/templates/dbpedia_14/templates.yaml @@ -14,6 +14,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -30,6 +32,8 @@ templates: {{title}} - {{content}} ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +50,8 @@ templates: refers to which one? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +71,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/discofuse/discofuse-sport/templates.yaml b/promptsource/templates/discofuse/discofuse-sport/templates.yaml index 706bbab0e..f6b964cda 100644 --- a/promptsource/templates/discofuse/discofuse-sport/templates.yaml +++ b/promptsource/templates/discofuse/discofuse-sport/templates.yaml @@ -9,6 +9,8 @@ templates: \ {{incoherent_second_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -34,6 +36,8 @@ templates: \ }}\n\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - AUC @@ -50,6 +54,8 @@ templates: \n {{incoherent_second_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -74,6 +80,8 @@ templates: {{coherent_first_sentence}} {{coherent_second_sentence}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -101,6 +109,8 @@ templates: {{coherent_first_sentence}} {{coherent_second_sentence}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -114,6 +124,8 @@ templates: \ the two sentences above?\n\n|||\n\n{{coherent_first_sentence}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -128,6 +140,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -152,6 +166,8 @@ templates: \n{{ answer_choices[poss_ans_list.index(discourse_type)] }}\n\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - AUC @@ -166,6 +182,8 @@ templates: \ {{incoherent_second_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -182,6 +200,8 @@ templates: \n {{incoherent_first_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml b/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml index 09a0e7ed6..e1e69a649 100644 --- a/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml +++ b/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml @@ -23,6 +23,8 @@ templates: {{coherent_first_sentence}} {{coherent_second_sentence}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -36,6 +38,8 @@ templates: \ {{incoherent_second_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -51,6 +55,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -66,6 +72,8 @@ templates: \n {{incoherent_second_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -82,6 +90,8 @@ templates: \n {{incoherent_first_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -107,6 +117,8 @@ templates: \n{{ answer_choices[poss_ans_list.index(discourse_type)] }}\n\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - AUC - Accuracy @@ -121,6 +133,8 @@ templates: \ {{incoherent_second_sentence}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -145,6 +159,8 @@ templates: {{coherent_first_sentence}} {{coherent_second_sentence}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -158,6 +174,8 @@ templates: \ the two sentences above?\n\n|||\n\n{{coherent_first_sentence}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -182,6 +200,8 @@ templates: \ }}\n\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - AUC diff --git a/promptsource/templates/discovery/discovery/templates.yaml b/promptsource/templates/discovery/discovery/templates.yaml index 28b85a230..194139f02 100644 --- a/promptsource/templates/discovery/discovery/templates.yaml +++ b/promptsource/templates/discovery/discovery/templates.yaml @@ -39,6 +39,8 @@ templates: \ }}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -83,6 +85,8 @@ templates: \ join(\"\\n- \") }}\n\n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +132,8 @@ templates: \ }}\n\n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -173,6 +179,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -217,6 +225,8 @@ templates: |||\n{{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/docred/templates.yaml b/promptsource/templates/docred/templates.yaml index 8a7e900eb..b289456a2 100644 --- a/promptsource/templates/docred/templates.yaml +++ b/promptsource/templates/docred/templates.yaml @@ -36,6 +36,8 @@ templates: \ \"}}{{ miscs | unique | join(\", \")}}{{\".\"}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -60,6 +62,8 @@ templates: }}{{type}}\n{% endfor %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -77,6 +81,8 @@ templates: \ \n|||\n{{ locations| unique | join(\", \")}}{{\".\"}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -95,6 +101,8 @@ templates: .\"}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -113,6 +121,8 @@ templates: \ | join(\" \") }}{{\" \"}}\n{%- endfor -%} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -133,6 +143,8 @@ templates: \ endfor -%} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -150,6 +162,8 @@ templates: \ \n|||\n{{ times| unique | join(\", \")}}{{\".\"}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -167,6 +181,8 @@ templates: \ \n|||\n{{ people| unique | join(\", \")}}{{\".\"}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -185,6 +201,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -203,6 +221,8 @@ templates: .\"}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false diff --git a/promptsource/templates/dream/templates.yaml b/promptsource/templates/dream/templates.yaml index 79a02983f..2e5765dca 100644 --- a/promptsource/templates/dream/templates.yaml +++ b/promptsource/templates/dream/templates.yaml @@ -16,6 +16,8 @@ templates: {{dialogue[-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -33,6 +35,8 @@ templates: {{dialogue | join("\n\n")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -52,6 +56,8 @@ templates: {{dialogue[0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -66,6 +72,8 @@ templates: |||\n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +87,8 @@ templates: \n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n|||\n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/drop/templates.yaml b/promptsource/templates/drop/templates.yaml index b43e2f904..ef3944a07 100644 --- a/promptsource/templates/drop/templates.yaml +++ b/promptsource/templates/drop/templates.yaml @@ -16,6 +16,8 @@ templates: ||| {{ answers_spans.spans | join(", ") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -30,6 +32,8 @@ templates: \n{{passage}} \n\n||| {{ answers_spans.spans | join(\", \") }}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -46,6 +50,8 @@ templates: Answer: ||| {{ answers_spans.spans | join(", ") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -67,6 +73,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -88,6 +96,8 @@ templates: ||| {{ answers_spans.spans | join(", ") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/duorc/ParaphraseRC/templates.yaml b/promptsource/templates/duorc/ParaphraseRC/templates.yaml index 016919e38..32e8f6a0a 100644 --- a/promptsource/templates/duorc/ParaphraseRC/templates.yaml +++ b/promptsource/templates/duorc/ParaphraseRC/templates.yaml @@ -15,6 +15,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -47,6 +49,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -77,6 +81,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -108,6 +114,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -123,6 +131,8 @@ templates: {{ question }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -154,6 +164,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -169,6 +181,8 @@ templates: {{title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -201,6 +215,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -214,6 +230,8 @@ templates: |||\n{{question}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/duorc/SelfRC/templates.yaml b/promptsource/templates/duorc/SelfRC/templates.yaml index 198279547..6a7a94929 100644 --- a/promptsource/templates/duorc/SelfRC/templates.yaml +++ b/promptsource/templates/duorc/SelfRC/templates.yaml @@ -9,6 +9,8 @@ templates: |||\n{{question}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -42,6 +44,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -72,6 +76,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -87,6 +93,8 @@ templates: {{ question }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -119,6 +127,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -138,6 +148,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -169,6 +181,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -184,6 +198,8 @@ templates: {{title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -216,6 +232,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/e2e_nlg_cleaned/templates.yaml b/promptsource/templates/e2e_nlg_cleaned/templates.yaml index 778e07e35..c45422970 100644 --- a/promptsource/templates/e2e_nlg_cleaned/templates.yaml +++ b/promptsource/templates/e2e_nlg_cleaned/templates.yaml @@ -16,6 +16,8 @@ templates: ||| {{human_reference}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +48,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -77,6 +81,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -97,6 +103,8 @@ templates: Generate some text about this restaurant. ||| {{human_reference}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -129,6 +137,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -158,6 +168,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -189,6 +201,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -205,6 +219,8 @@ templates: ||| {{human_reference}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -233,6 +249,8 @@ templates: {% endif %} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -264,6 +282,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -278,6 +298,8 @@ templates: {{meaning_representation}} ||| {{human_reference}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -310,6 +332,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml b/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml index 3601744f7..4e12360cf 100644 --- a/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml +++ b/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml @@ -15,6 +15,8 @@ templates: {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -32,6 +34,8 @@ templates: \ | length] | min]]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -56,6 +60,8 @@ templates: {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -77,6 +83,8 @@ templates: {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}.' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -101,6 +109,8 @@ templates: {{ answer_choices[{1:1 ,2:1 ,3:0 ,4:0}[[4,labels | length] | min]] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: false @@ -140,6 +150,8 @@ templates: {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/emo/templates.yaml b/promptsource/templates/emo/templates.yaml index e6997752d..a697d18bf 100644 --- a/promptsource/templates/emo/templates.yaml +++ b/promptsource/templates/emo/templates.yaml @@ -10,6 +10,8 @@ templates: \ [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -26,6 +28,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -52,6 +56,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -66,6 +72,8 @@ templates: \ {{answer_choices[3]}} or {{answer_choices[0]}} \n\n|||\n\n{{answer_choices[label]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -85,6 +93,8 @@ templates: {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}}? ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -101,6 +111,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -117,6 +129,8 @@ templates: \ [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -142,6 +156,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -165,6 +181,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -190,6 +208,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/emotion/templates.yaml b/promptsource/templates/emotion/templates.yaml index 0032964c9..ae19b8811 100644 --- a/promptsource/templates/emotion/templates.yaml +++ b/promptsource/templates/emotion/templates.yaml @@ -10,6 +10,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -25,6 +27,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -37,6 +41,8 @@ templates: \ [label] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +58,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +73,8 @@ templates: }}, \n\nthe emotion in the message is \n|||\n{{ answer_choices [label] }}'" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +90,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/enriched_web_nlg/en/templates.yaml b/promptsource/templates/enriched_web_nlg/en/templates.yaml index 07258554e..4e9c38a42 100644 --- a/promptsource/templates/enriched_web_nlg/en/templates.yaml +++ b/promptsource/templates/enriched_web_nlg/en/templates.yaml @@ -9,9 +9,11 @@ templates: {{lex.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - - BLEU - - ROUGE + - BLEU + - ROUGE original_task: true name: Non-explicit Description reference: 'Triple set: lexicalization' @@ -22,9 +24,11 @@ templates: | join(", ")}} ||| {{lex.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - - BLEU - - ROUGE + - BLEU + - ROUGE original_task: true name: Verbalize + Specify Commas reference: 'Instruction: verbalization' @@ -37,21 +41,26 @@ templates: {{lex.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - - BLEU - - ROUGE + - BLEU + - ROUGE original_task: true name: Explicit Graph Description reference: 'Explicit Graph Description: verbalization.' e80f68dd-ebd0-4cbc-960d-bb28aff2d2d4: !Template answer_choices: null id: e80f68dd-ebd0-4cbc-960d-bb28aff2d2d4 - jinja: 'Take the following Table to text task comprising semantic triples (RDF triples), where each element of - a triple is separated by "|" and each triple set by ",": {{modified_triple_sets.mtriple_set[0] - | join(", ")}}. Make a verbalization of the triple set into plain text, which fully and accurately describes the Table. ||| - {{lex.text | choice}}' + jinja: 'Take the following Table to text task comprising semantic triples (RDF + triples), where each element of a triple is separated by "|" and each triple + set by ",": {{modified_triple_sets.mtriple_set[0] | join(", ")}}. Make a verbalization + of the triple set into plain text, which fully and accurately describes the + Table. ||| {{lex.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/esnli/templates.yaml b/promptsource/templates/esnli/templates.yaml index 39f3bd78f..7d1f2ca61 100755 --- a/promptsource/templates/esnli/templates.yaml +++ b/promptsource/templates/esnli/templates.yaml @@ -17,6 +17,8 @@ templates: {{ answer_choices |select("!=","") |list |choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -33,6 +35,8 @@ templates: |||\n{{ answer_choices |select(\"!=\",\"\") |list |choice }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -47,6 +51,8 @@ templates: \ |select(\"!=\",\"\") |list |choice }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -69,6 +75,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +99,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -113,6 +123,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -135,6 +147,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -147,6 +161,8 @@ templates: \ }}?\n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -160,6 +176,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -174,6 +192,8 @@ templates: |||\n{{ answer_choices |select(\"!=\",\"\") |list |choice }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true diff --git a/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml b/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml index ca98d6a60..973f5cd8c 100644 --- a/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml +++ b/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml @@ -16,6 +16,8 @@ templates: \nthe intervention was\n\n{% endif %}\n\n|||\n\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -37,6 +39,8 @@ templates: \ was:\n\n{% endif %}\n\n|||\n\n{{Prompts.Comparator[specific_sub_annotation]}}." metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -63,6 +67,8 @@ templates: \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -87,6 +93,8 @@ templates: \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +116,8 @@ templates: \nthe outcome was\n\n{% endif %}\n\n|||\n\n{{Prompts.Outcome[specific_sub_annotation]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml b/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml index 4d9f65f8d..694224cf3 100644 --- a/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml +++ b/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml @@ -16,6 +16,8 @@ templates: \nthe intervention was\n\n{% endif %}\n\n|||\n\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -37,6 +39,8 @@ templates: \ was:\n\n{% endif %}\n\n|||\n\n{{Prompts.Comparator[specific_sub_annotation]}}." metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -61,6 +65,8 @@ templates: \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -87,6 +93,8 @@ templates: \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +116,8 @@ templates: \nthe outcome was\n\n{% endif %}\n\n|||\n\n{{Prompts.Outcome[specific_sub_annotation]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/fever/v1.0/templates.yaml b/promptsource/templates/fever/v1.0/templates.yaml index d07d159f1..d25b27a3b 100644 --- a/promptsource/templates/fever/v1.0/templates.yaml +++ b/promptsource/templates/fever/v1.0/templates.yaml @@ -9,6 +9,8 @@ templates: }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -22,6 +24,8 @@ templates: \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -36,6 +40,8 @@ templates: \n}[label]\n}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -49,6 +55,8 @@ templates: }}\n{% endif %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -62,6 +70,8 @@ templates: \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/fever/v2.0/templates.yaml b/promptsource/templates/fever/v2.0/templates.yaml index 0e89ea48e..f623b64e8 100644 --- a/promptsource/templates/fever/v2.0/templates.yaml +++ b/promptsource/templates/fever/v2.0/templates.yaml @@ -9,6 +9,8 @@ templates: }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -22,6 +24,8 @@ templates: \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -36,6 +40,8 @@ templates: \n}[label]\n}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -49,6 +55,8 @@ templates: }}\n{% endif %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -62,6 +70,8 @@ templates: \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml b/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml index bc8504f25..1804b02ac 100644 --- a/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml +++ b/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml @@ -14,6 +14,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -31,6 +33,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -49,6 +53,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -66,6 +72,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -84,6 +92,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -102,6 +112,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -120,6 +132,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -138,6 +152,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -156,6 +172,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/freebase_qa/templates.yaml b/promptsource/templates/freebase_qa/templates.yaml index ed38ca84d..90ffcaa59 100644 --- a/promptsource/templates/freebase_qa/templates.yaml +++ b/promptsource/templates/freebase_qa/templates.yaml @@ -13,6 +13,8 @@ templates: \ \") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -35,6 +37,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -48,6 +52,8 @@ templates: capitalize\") | join(\" \") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -63,6 +69,8 @@ templates: \ \") | map(\"capitalize\") | join(\" \") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -78,6 +86,8 @@ templates: .\") | last | capitalize | replace(\"_\", \" \")}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/generated_reviews_enth/templates.yaml b/promptsource/templates/generated_reviews_enth/templates.yaml index 29c77f2a1..31c85b55a 100644 --- a/promptsource/templates/generated_reviews_enth/templates.yaml +++ b/promptsource/templates/generated_reviews_enth/templates.yaml @@ -7,6 +7,8 @@ templates: if review_star<3 else 1]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - AUC - Accuracy @@ -16,10 +18,12 @@ templates: 95136948-3402-4bd4-8a69-1aa7b85461cc: !Template answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 id: 95136948-3402-4bd4-8a69-1aa7b85461cc - jinja: 'Rate the positivity of this review ({{"1"}} being the lowest and {{"5"}} - the highest).\n"{{translation.en}}" ||| {{review_star}}' + jinja: Rate the positivity of this review ({{"1"}} being the lowest and {{"5"}} + the highest).\n"{{translation.en}}" ||| {{review_star}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - AUC - Accuracy @@ -33,6 +37,8 @@ templates: \ {{\"0\"}} and {{\"5\"}}. ||| {{review_star}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - AUC @@ -46,6 +52,8 @@ templates: ||| {{review_star}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - AUC - Accuracy @@ -59,6 +67,8 @@ templates: if review_star < 3 else 1]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - AUC - Accuracy diff --git a/promptsource/templates/gigaword/templates.yaml b/promptsource/templates/gigaword/templates.yaml index 3a3fbe7e5..c2721b4db 100644 --- a/promptsource/templates/gigaword/templates.yaml +++ b/promptsource/templates/gigaword/templates.yaml @@ -12,6 +12,8 @@ templates: Generate a title for this article: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -27,6 +29,8 @@ templates: ||| {{document}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -42,6 +46,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -57,6 +63,8 @@ templates: Title: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -72,6 +80,8 @@ templates: TL;DR: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -90,6 +100,8 @@ templates: Given the above sentence, write its title: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -102,6 +114,8 @@ templates: jinja: "Write a title for this sentence: {{document}} \n\nTitle: ||| {{summary}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -114,6 +128,8 @@ templates: jinja: '{{document}} In a nutshell, ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -132,6 +148,8 @@ templates: Write an article with the given title: ||| {{document}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/glue/ax/templates.yaml b/promptsource/templates/glue/ax/templates.yaml index 4712ae9d2..45103f155 100644 --- a/promptsource/templates/glue/ax/templates.yaml +++ b/promptsource/templates/glue/ax/templates.yaml @@ -16,6 +16,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -50,6 +54,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +74,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +99,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/cola/templates.yaml b/promptsource/templates/glue/cola/templates.yaml index e6e6a244e..4fc6e828a 100644 --- a/promptsource/templates/glue/cola/templates.yaml +++ b/promptsource/templates/glue/cola/templates.yaml @@ -14,6 +14,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +56,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +76,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -88,6 +96,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/mnli/templates.yaml b/promptsource/templates/glue/mnli/templates.yaml index 1366a82f2..5f1101939 100644 --- a/promptsource/templates/glue/mnli/templates.yaml +++ b/promptsource/templates/glue/mnli/templates.yaml @@ -8,6 +8,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +52,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +68,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +82,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -84,6 +96,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -97,6 +111,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +125,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -121,6 +139,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -134,6 +154,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -148,6 +170,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +184,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -172,6 +198,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -185,6 +213,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/mnli_matched/templates.yaml b/promptsource/templates/glue/mnli_matched/templates.yaml index 2fe1a26e9..9491e0c32 100644 --- a/promptsource/templates/glue/mnli_matched/templates.yaml +++ b/promptsource/templates/glue/mnli_matched/templates.yaml @@ -8,6 +8,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -22,6 +24,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +38,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -47,6 +53,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +67,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +82,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -84,6 +96,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -96,6 +110,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +125,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -123,6 +141,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -136,6 +156,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -148,6 +170,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +184,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -173,6 +199,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -185,6 +213,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/mnli_mismatched/templates.yaml b/promptsource/templates/glue/mnli_mismatched/templates.yaml index 4bfb00b4d..f46363306 100644 --- a/promptsource/templates/glue/mnli_mismatched/templates.yaml +++ b/promptsource/templates/glue/mnli_mismatched/templates.yaml @@ -10,6 +10,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -35,6 +39,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -49,6 +55,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +69,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -74,6 +84,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +98,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -98,6 +112,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +126,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +140,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -135,6 +155,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -147,6 +169,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -159,6 +183,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -172,6 +198,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -185,6 +213,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/mrpc/templates.yaml b/promptsource/templates/glue/mrpc/templates.yaml index af78b1eef..1bc47af3c 100644 --- a/promptsource/templates/glue/mrpc/templates.yaml +++ b/promptsource/templates/glue/mrpc/templates.yaml @@ -15,6 +15,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -37,6 +39,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +62,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +83,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -96,6 +104,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -120,6 +130,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -139,6 +151,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/qnli/templates.yaml b/promptsource/templates/glue/qnli/templates.yaml index 70440f180..dd6a0b37f 100644 --- a/promptsource/templates/glue/qnli/templates.yaml +++ b/promptsource/templates/glue/qnli/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -30,6 +32,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +49,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +71,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -89,6 +97,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/qqp/templates.yaml b/promptsource/templates/glue/qqp/templates.yaml index 9fc1f4134..44063e98b 100644 --- a/promptsource/templates/glue/qqp/templates.yaml +++ b/promptsource/templates/glue/qqp/templates.yaml @@ -10,6 +10,8 @@ templates: }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -41,6 +45,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -53,6 +59,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -69,6 +77,8 @@ templates: Do these two questions convey the same meaning? Yes or no? ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -81,6 +91,8 @@ templates: duplicates? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/rte/templates.yaml b/promptsource/templates/glue/rte/templates.yaml index 0eb5fba79..db70dfcb2 100644 --- a/promptsource/templates/glue/rte/templates.yaml +++ b/promptsource/templates/glue/rte/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +56,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +74,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -89,6 +97,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/sst2/templates.yaml b/promptsource/templates/glue/sst2/templates.yaml index 2dc6537d6..565e13f94 100644 --- a/promptsource/templates/glue/sst2/templates.yaml +++ b/promptsource/templates/glue/sst2/templates.yaml @@ -10,6 +10,8 @@ templates: answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -25,6 +27,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -40,6 +44,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -57,6 +63,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -69,6 +77,8 @@ templates: they are feeling {{"good"}} or {{"bad"}}? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/glue/stsb/templates.yaml b/promptsource/templates/glue/stsb/templates.yaml index fc74387ae..b76c47b44 100644 --- a/promptsource/templates/glue/stsb/templates.yaml +++ b/promptsource/templates/glue/stsb/templates.yaml @@ -8,6 +8,8 @@ templates: and "{{sentence2}}" are. ||| {{ (((5*label) | round )/5) }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation - Spearman Correlation @@ -28,6 +30,8 @@ templates: {{ (((5*label) | round )/5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation - Spearman Correlation @@ -51,6 +55,8 @@ templates: {{ (((5*label) | round )/5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation - Spearman Correlation @@ -78,6 +84,8 @@ templates: {{ (((5*label) | round )/5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation - Spearman Correlation @@ -101,6 +109,8 @@ templates: {{ (((5*label) | round )/5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation - Spearman Correlation diff --git a/promptsource/templates/glue/wnli/templates.yaml b/promptsource/templates/glue/wnli/templates.yaml index 0bf21f4a4..26e6800c3 100644 --- a/promptsource/templates/glue/wnli/templates.yaml +++ b/promptsource/templates/glue/wnli/templates.yaml @@ -19,6 +19,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +41,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +62,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -74,6 +80,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +101,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/google_wellformed_query/templates.yaml b/promptsource/templates/google_wellformed_query/templates.yaml index 46623250c..ac8867604 100644 --- a/promptsource/templates/google_wellformed_query/templates.yaml +++ b/promptsource/templates/google_wellformed_query/templates.yaml @@ -12,6 +12,8 @@ templates: {{ rating | round(0) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -30,6 +32,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +50,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -62,6 +68,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +87,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -95,6 +105,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/great_code/templates.yaml b/promptsource/templates/great_code/templates.yaml index 83fc3237d..efc5d0da5 100644 --- a/promptsource/templates/great_code/templates.yaml +++ b/promptsource/templates/great_code/templates.yaml @@ -22,6 +22,8 @@ templates: {{ ns.target }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -64,6 +66,8 @@ templates: \ {{fixed_buggy_line_content}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -91,6 +95,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -115,6 +121,8 @@ templates: , False: \"No\"}[has_bug] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -146,6 +154,8 @@ templates: \ %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml b/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml index a7a6755b5..ff5ff1da5 100644 --- a/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml +++ b/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml @@ -16,6 +16,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +50,8 @@ templates: ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -70,6 +76,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +90,8 @@ templates: , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -104,6 +114,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +134,8 @@ templates: \ {{ article }} \n|||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +151,8 @@ templates: \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -153,6 +169,8 @@ templates: , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -170,6 +188,8 @@ templates: |||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml index a37691733..f2f8f00f4 100644 --- a/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml +++ b/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml @@ -16,6 +16,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +50,8 @@ templates: ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -70,6 +76,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +90,8 @@ templates: , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -104,6 +114,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +134,8 @@ templates: \ {{ article }} \n|||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +151,8 @@ templates: \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -153,6 +169,8 @@ templates: , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -170,6 +188,8 @@ templates: |||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml index cb2d68bbc..603306f65 100644 --- a/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml +++ b/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml @@ -16,6 +16,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +50,8 @@ templates: ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -70,6 +76,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +90,8 @@ templates: , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -104,6 +114,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +134,8 @@ templates: \ {{ article }} \n|||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +151,8 @@ templates: \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -153,6 +169,8 @@ templates: , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -170,8 +188,10 @@ templates: |||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true name: who_wrote_article_with_topic_hint - reference: '' \ No newline at end of file + reference: '' diff --git a/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml index 813e7ac50..44fae27b6 100644 --- a/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml +++ b/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml @@ -16,6 +16,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +50,8 @@ templates: ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -70,6 +76,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +90,8 @@ templates: , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -104,6 +114,8 @@ templates: {{ answer_choices[author] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +134,8 @@ templates: \ {{ article }} \n|||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +151,8 @@ templates: \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -153,6 +169,8 @@ templates: , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -170,6 +188,8 @@ templates: |||\n{{ answer_choices[author] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/gutenberg_time/templates.yaml b/promptsource/templates/gutenberg_time/templates.yaml index bf1e3ba86..fbc9e9700 100644 --- a/promptsource/templates/gutenberg_time/templates.yaml +++ b/promptsource/templates/gutenberg_time/templates.yaml @@ -13,6 +13,8 @@ templates: {{time_phrase}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -37,6 +39,8 @@ templates: {{answer_choices[idx]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +62,8 @@ templates: {{answer_choices[0] if hour < 12 else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +78,8 @@ templates: \ = hour_reference | int %}\n{{answer_choices[0] if hour < 12 else answer_choices[1]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -95,6 +103,8 @@ templates: {{answer_choices[idx]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -119,6 +129,8 @@ templates: {{answer_choices[idx]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hans/templates.yaml b/promptsource/templates/hans/templates.yaml index 832237879..3406a03f7 100644 --- a/promptsource/templates/hans/templates.yaml +++ b/promptsource/templates/hans/templates.yaml @@ -7,6 +7,8 @@ templates: \ or no? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: or no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +35,8 @@ templates: Yes or no? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +49,8 @@ templates: {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +63,8 @@ templates: no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +77,8 @@ templates: Yes or no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +92,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -92,6 +106,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: Question: {{hypothesis}} True or False? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -117,6 +135,8 @@ templates: {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hate_speech18/templates.yaml b/promptsource/templates/hate_speech18/templates.yaml index 71b1a59c5..e94a2aa45 100644 --- a/promptsource/templates/hate_speech18/templates.yaml +++ b/promptsource/templates/hate_speech18/templates.yaml @@ -5,12 +5,14 @@ templates: id: 3266f9d4-9c80-4e17-a8a6-1fe44ca8f3bf jinja: '{% if label in [0, 1] %} - {{text}} Is the sentiment the sentence expresses is a {{answer_choices[1]}} speech - or {{answer_choices[0]}} speech? ||| {{answer_choices[label]}} + {{text}} Is the sentiment the sentence expresses is a {{answer_choices[1]}} + speech or {{answer_choices[0]}} speech? ||| {{answer_choices[label]}} - {% endif %}' + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/head_qa/en/templates.yaml b/promptsource/templates/head_qa/en/templates.yaml index e45b75781..669dd2b0e 100644 --- a/promptsource/templates/head_qa/en/templates.yaml +++ b/promptsource/templates/head_qa/en/templates.yaml @@ -26,6 +26,8 @@ templates: {{ra | string}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -58,6 +60,8 @@ templates: {%- endfor %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -90,6 +94,8 @@ templates: {{ra | string}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -121,6 +127,8 @@ templates: {%- endfor %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -151,6 +159,8 @@ templates: {%- endfor %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other @@ -182,6 +192,8 @@ templates: {%- endfor %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/health_fact/templates.yaml b/promptsource/templates/health_fact/templates.yaml index 93c2db519..6a563a7d4 100644 --- a/promptsource/templates/health_fact/templates.yaml +++ b/promptsource/templates/health_fact/templates.yaml @@ -8,6 +8,8 @@ templates: \ proven yet that\"][label]}}:\n\n{{claim}}\n\nis true because of: |||\n\n{{explanation}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -22,6 +24,8 @@ templates: |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +38,8 @@ templates: \ \n\nGiven the above, this claim is: |||\n {{answer_choices[label]}} " metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -64,6 +70,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -88,6 +96,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hellaswag/templates.yaml b/promptsource/templates/hellaswag/templates.yaml index c3d90483b..09d540d85 100644 --- a/promptsource/templates/hellaswag/templates.yaml +++ b/promptsource/templates/hellaswag/templates.yaml @@ -24,6 +24,8 @@ templates: {{ answer_choices[label | int()] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -35,6 +37,8 @@ templates: jinja: "What is the topic of the sentence: {{ctx}} \n|||\n{{activity_label}} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -51,6 +55,8 @@ templates: {{answer_choices[label | int()]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -64,6 +70,8 @@ templates: \ paragraph? \n|||\n{{activity_label}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -107,6 +115,8 @@ templates: {{answer_choices [label | int()]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -121,6 +131,8 @@ templates: \ %}\n{{answer_choices[0]}}\n{% else %} \n{{answer_choices[1]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -153,6 +165,8 @@ templates: {{answer_choices [label | int()]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -170,6 +184,8 @@ templates: {{ctx}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -186,6 +202,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -214,6 +232,8 @@ templates: ||| {{ answer_choices[label | int()] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -228,6 +248,8 @@ templates: \ | int()] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hlgd/templates.yaml b/promptsource/templates/hlgd/templates.yaml index 9a67d4ac4..a52f04378 100644 --- a/promptsource/templates/hlgd/templates.yaml +++ b/promptsource/templates/hlgd/templates.yaml @@ -10,6 +10,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: \ answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -48,6 +52,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +66,8 @@ templates: \ for the same event? \n|||\n{% if label %}\n{{headline_b}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -76,6 +84,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -89,6 +99,8 @@ templates: {% else %}\n{{ answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -113,6 +125,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +151,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -163,6 +179,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -178,6 +196,8 @@ templates: \ }}\n{% else %}\n{{ answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -203,6 +223,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hotpot_qa/distractor/templates.yaml b/promptsource/templates/hotpot_qa/distractor/templates.yaml index c91bea113..6f0cfebc2 100644 --- a/promptsource/templates/hotpot_qa/distractor/templates.yaml +++ b/promptsource/templates/hotpot_qa/distractor/templates.yaml @@ -12,6 +12,8 @@ templates: \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - COQA F1 - Other @@ -29,6 +31,8 @@ templates: \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - Other @@ -45,6 +49,8 @@ templates: \ | join(\"\")}}\n{% endfor %}\n||| \n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -58,6 +64,8 @@ templates: \ %}\n||| \n{{question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -76,6 +84,8 @@ templates: \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -90,6 +100,8 @@ templates: {% endfor %}\n||| \n{{context.title | join(\"; \")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -105,6 +117,8 @@ templates: \")}}\n{% endfor %}\n||| \n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/hotpot_qa/fullwiki/templates.yaml b/promptsource/templates/hotpot_qa/fullwiki/templates.yaml index 68d96061b..9e8333075 100644 --- a/promptsource/templates/hotpot_qa/fullwiki/templates.yaml +++ b/promptsource/templates/hotpot_qa/fullwiki/templates.yaml @@ -9,6 +9,8 @@ templates: \")}}\n{% endfor %}\n||| \n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -26,6 +28,8 @@ templates: \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -43,6 +47,8 @@ templates: \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - COQA F1 - Other @@ -60,6 +66,8 @@ templates: {{type}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -73,6 +81,8 @@ templates: {% endfor %}\n||| \n{{context.title | join(\"; \")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -88,6 +98,8 @@ templates: \ %}\n||| \n{{question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -105,6 +117,8 @@ templates: \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - Other @@ -121,6 +135,8 @@ templates: \ | join(\"\")}}\n{% endfor %}\n||| \n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/humicroedit/subtask-1/templates.yaml b/promptsource/templates/humicroedit/subtask-1/templates.yaml index 5fc2615d6..7451e1a8a 100644 --- a/promptsource/templates/humicroedit/subtask-1/templates.yaml +++ b/promptsource/templates/humicroedit/subtask-1/templates.yaml @@ -18,6 +18,8 @@ templates: {{ (((5 * meanGrade) | round) / 5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -43,6 +45,8 @@ templates: {{ (((5 * meanGrade) | round) / 5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -63,6 +67,8 @@ templates: {{ (((5 * meanGrade) | round) / 5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -79,6 +85,8 @@ templates: \ a high score means very funny.\n||| \n{{ (((5 * meanGrade) | round) / 5) }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -103,6 +111,8 @@ templates: {{ (((5 * meanGrade) | round) / 5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -129,6 +139,8 @@ templates: {{ (((5 * meanGrade) | round) / 5) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/humicroedit/subtask-2/templates.yaml b/promptsource/templates/humicroedit/subtask-2/templates.yaml index 72d9a0c37..24241c1ec 100644 --- a/promptsource/templates/humicroedit/subtask-2/templates.yaml +++ b/promptsource/templates/humicroedit/subtask-2/templates.yaml @@ -23,6 +23,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -42,6 +44,8 @@ templates: \ Equal\n|||\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -69,6 +73,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -98,6 +104,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -120,6 +128,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -149,6 +159,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -168,6 +180,8 @@ templates: {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -186,6 +200,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml b/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml index 68ebbdd29..848939d01 100644 --- a/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml +++ b/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml @@ -19,6 +19,8 @@ templates: {{answer_choices[0] if hyperpartisan else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -40,6 +42,8 @@ templates: {{answer_choices[0] if hyperpartisan else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +65,8 @@ templates: {{answer_choices[0] if hyperpartisan else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -82,6 +88,8 @@ templates: {{answer_choices[0] if hyperpartisan else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -104,6 +112,8 @@ templates: {{answer_choices[0] if hyperpartisan else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +138,8 @@ templates: {{answer_choices[0] if hyperpartisan else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml b/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml index f43cda2c7..b428431b0 100644 --- a/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml +++ b/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml @@ -20,6 +20,8 @@ templates: {{ answer_choices[bias] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +46,8 @@ templates: {{ answer_choices[bias] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +74,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -94,6 +100,8 @@ templates: {{ answer_choices[bias] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -121,6 +129,8 @@ templates: {{ answer_choices[bias] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/imdb/templates.yaml b/promptsource/templates/imdb/templates.yaml index dd6bb684f..92d2c93a4 100644 --- a/promptsource/templates/imdb/templates.yaml +++ b/promptsource/templates/imdb/templates.yaml @@ -7,6 +7,8 @@ templates: [label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +35,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +49,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +63,8 @@ templates: answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +77,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +91,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -90,6 +104,8 @@ templates: jinja: '{{text}} This is definitely not a ||| {{ answer_choices [1-label]}} review.' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -102,6 +118,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -114,6 +132,8 @@ templates: ||| {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +146,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/jfleg/templates.yaml b/promptsource/templates/jfleg/templates.yaml index 8d5a46cc2..c03bcebbd 100644 --- a/promptsource/templates/jfleg/templates.yaml +++ b/promptsource/templates/jfleg/templates.yaml @@ -3,8 +3,8 @@ templates: 18d3362c-74e1-4cda-9b16-001948d9196b: !Template answer_choices: null id: 18d3362c-74e1-4cda-9b16-001948d9196b - jinja: 'I am correcting the grammar exercises of my students. How should the - following sentence be re-written? + jinja: 'I am correcting the grammar exercises of my students. How should the following + sentence be re-written? {{sentence}} @@ -17,6 +17,8 @@ templates: {{corrections | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -41,6 +43,8 @@ templates: {{ corrections | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -65,6 +69,8 @@ templates: {{ corrections | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -84,6 +90,8 @@ templates: {{ corrections | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -106,6 +114,8 @@ templates: {{ corrections | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -129,6 +139,8 @@ templates: {{ corrections | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -142,6 +154,8 @@ templates: \ be \n\n|||\n\n{{ corrections | choice }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/jigsaw_unintended_bias/templates.yaml b/promptsource/templates/jigsaw_unintended_bias/templates.yaml index 3a47e830f..cae2735f1 100644 --- a/promptsource/templates/jigsaw_unintended_bias/templates.yaml +++ b/promptsource/templates/jigsaw_unintended_bias/templates.yaml @@ -14,6 +14,8 @@ templates: {{(target * 100.0) | round(0) }} ' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -33,6 +35,8 @@ templates: {{answer_choices[target | round | int]}} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -47,6 +51,8 @@ templates: \ | round(1) }}" metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -59,6 +65,8 @@ templates: ||| {{(target * 5.0) | round(1) }} metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -79,6 +87,8 @@ templates: {{target | round(1) }} ' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -99,6 +109,8 @@ templates: {{answer_choices[target | round | int]}} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -112,6 +124,8 @@ templates: \n||| \n\n{{answer_choices[target | round | int]}}\n\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -129,6 +143,8 @@ templates: {{threat | round(1)}} ' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -145,6 +161,8 @@ templates: {{answer_choices[target | round | int]}} ' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -163,6 +181,8 @@ templates: {{answer_choices[target | round | int]}} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -182,6 +202,8 @@ templates: {{target | round(1)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/kelm/templates.yaml b/promptsource/templates/kelm/templates.yaml index 73b4a9ebb..f6d723749 100644 --- a/promptsource/templates/kelm/templates.yaml +++ b/promptsource/templates/kelm/templates.yaml @@ -10,6 +10,8 @@ templates: \ }} \n|||\n{{ sentence }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -25,6 +27,8 @@ templates: \nSentence: {{sentence}} \n|||\n{{triple}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -42,6 +46,8 @@ templates: \ {{ triple }} \n\n|||\n\n{{ sentence }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -60,6 +66,8 @@ templates: {{ sentence }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -75,6 +83,8 @@ templates: \nTriple: {{ triple }} \n|||\n{{ sentence }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -91,6 +101,8 @@ templates: \ What is the sentence?\n\nFacts: {{ triple }} \n\n|||\n\n{{ sentence }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml b/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml index 437291801..934b7e027 100644 --- a/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml +++ b/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml @@ -18,6 +18,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -37,6 +39,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -56,6 +60,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -78,6 +84,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -97,6 +105,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false diff --git a/promptsource/templates/kilt_tasks/nq/templates.yaml b/promptsource/templates/kilt_tasks/nq/templates.yaml index f5c364639..18bf44b0e 100644 --- a/promptsource/templates/kilt_tasks/nq/templates.yaml +++ b/promptsource/templates/kilt_tasks/nq/templates.yaml @@ -21,6 +21,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -44,6 +46,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -67,6 +71,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -88,6 +94,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -111,6 +119,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -132,6 +142,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -155,6 +167,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/lama/trex/templates.yaml b/promptsource/templates/lama/trex/templates.yaml index 2e9631bc1..30dfe500b 100644 --- a/promptsource/templates/lama/trex/templates.yaml +++ b/promptsource/templates/lama/trex/templates.yaml @@ -7,6 +7,8 @@ templates: jinja: '{{masked_sentence}} Fill the mask with the missing word. ||| {{obj_label}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -21,6 +23,8 @@ templates: ||| {{ template | replace("[X]",sub_label) | replace("[Y]", obj_surface)}} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - BLEU @@ -34,6 +38,8 @@ templates: jinja: 'Replace the mask with the correct word: {{masked_sentence}} ||| {{obj_label}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -47,6 +53,8 @@ templates: \ | replace(\"[X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - BLEU @@ -62,6 +70,8 @@ templates: [X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - BLEU diff --git a/promptsource/templates/lambada/templates.yaml b/promptsource/templates/lambada/templates.yaml index e12c6abf6..371f6d37f 100644 --- a/promptsource/templates/lambada/templates.yaml +++ b/promptsource/templates/lambada/templates.yaml @@ -9,6 +9,8 @@ templates: {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: Fill in the ____: ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -37,6 +41,8 @@ templates: What comes after the ellipses? ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -51,6 +57,8 @@ templates: {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +73,8 @@ templates: {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/liar/templates.yaml b/promptsource/templates/liar/templates.yaml index 2871664ee..4f443eb98 100644 --- a/promptsource/templates/liar/templates.yaml +++ b/promptsource/templates/liar/templates.yaml @@ -22,6 +22,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -46,6 +48,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -67,6 +71,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -89,6 +95,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -135,6 +143,8 @@ templates: {{subject.split(",")[0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/limit/templates.yaml b/promptsource/templates/limit/templates.yaml index 37ceebca5..ce0146777 100644 --- a/promptsource/templates/limit/templates.yaml +++ b/promptsource/templates/limit/templates.yaml @@ -22,6 +22,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -50,6 +52,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -79,6 +83,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -92,6 +98,8 @@ templates: \ }}\n{% else %}\n{{ answer_choices[1] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -109,6 +117,8 @@ templates: {{motion_entities | length}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -124,6 +134,8 @@ templates: entity\") | join(\", \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -139,6 +151,8 @@ templates: entity\") | join(\", \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -168,6 +182,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -185,6 +201,8 @@ templates: {{motion_entities | length}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -213,6 +231,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -242,6 +262,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -258,6 +280,8 @@ templates: \ %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml index 3c3a6e594..6b6041242 100644 --- a/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml +++ b/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -31,6 +33,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -50,6 +54,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -71,6 +77,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -92,6 +100,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -110,6 +120,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -127,6 +139,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -142,6 +156,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -158,6 +174,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -173,6 +191,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml index 8463ced2a..ae4bf7bf4 100644 --- a/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml +++ b/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml @@ -15,6 +15,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -32,6 +34,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -51,6 +55,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -67,6 +73,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -83,6 +91,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -103,6 +113,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -124,6 +136,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -145,6 +159,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -162,6 +178,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -177,6 +195,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml index 3f0695559..599e87b34 100644 --- a/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml +++ b/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -28,6 +30,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -48,6 +52,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -69,6 +75,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -87,6 +95,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -108,6 +118,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -126,6 +138,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -143,6 +157,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -158,6 +174,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -173,6 +191,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml index aba920ef5..ab37043cd 100644 --- a/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml +++ b/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -32,6 +34,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -51,6 +55,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -72,6 +78,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -93,6 +101,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -110,6 +120,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -126,6 +138,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -142,6 +156,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -158,6 +174,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -177,6 +195,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/math_qa/templates.yaml b/promptsource/templates/math_qa/templates.yaml index c6de147bb..c1e4f5492 100644 --- a/promptsource/templates/math_qa/templates.yaml +++ b/promptsource/templates/math_qa/templates.yaml @@ -13,6 +13,8 @@ templates: {{correct}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +36,8 @@ templates: {{correct}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +50,8 @@ templates: {{correct}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +64,8 @@ templates: \ choose the right one? \n\n{{options}}\n\nProblem: {{Problem}}\n|||\n{{correct}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +79,8 @@ templates: {{Problem}} \n\nCategories:\n{{answer_choices | join(\"\\n\")}}\n|||\n{{category}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -82,6 +92,8 @@ templates: jinja: "Solve this advanced GRE problem: \n{{Problem}}\n\n{{options}}|||\n{{correct}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/mc_taco/templates.yaml b/promptsource/templates/mc_taco/templates.yaml index e64da4e49..e101ac9de 100644 --- a/promptsource/templates/mc_taco/templates.yaml +++ b/promptsource/templates/mc_taco/templates.yaml @@ -21,6 +21,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +36,8 @@ templates: \nA: \"{{answer}}\" \n\nThis answer is definitely not ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -53,6 +57,8 @@ templates: {{answer_choices[category]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -80,6 +86,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -105,6 +113,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -133,6 +143,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -162,6 +174,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -179,6 +193,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -194,6 +210,8 @@ templates: {{answer_choices[category]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -218,6 +236,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/mdd/task1_qa/templates.yaml b/promptsource/templates/mdd/task1_qa/templates.yaml index c993f32e9..4661dd67f 100644 --- a/promptsource/templates/mdd/task1_qa/templates.yaml +++ b/promptsource/templates/mdd/task1_qa/templates.yaml @@ -8,6 +8,8 @@ templates: \ | capitalize }}{{ dialogue_turns.utterance[0][1:] }} \n|||\n{{dialogue_turns.utterance[1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -26,6 +28,8 @@ templates: }} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -39,6 +43,8 @@ templates: \ }} \n|||\n{{dialogue_turns.utterance[1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -54,6 +60,8 @@ templates: \ }} \n\nSpeaker {{ dialogue_turns.speaker[1] }}:\n|||\n{{dialogue_turns.utterance[1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -71,6 +79,8 @@ templates: }} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -84,6 +94,8 @@ templates: \ capitalize }}{{ dialogue_turns.utterance[0][1:] }} \n|||\n{{dialogue_turns.utterance[1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/mdd/task2_recs/templates.yaml b/promptsource/templates/mdd/task2_recs/templates.yaml index ffd7c3379..a9837c2cf 100644 --- a/promptsource/templates/mdd/task2_recs/templates.yaml +++ b/promptsource/templates/mdd/task2_recs/templates.yaml @@ -11,6 +11,8 @@ templates: \ \n|||\n{{dialogue_turns.utterance[1]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -26,6 +28,8 @@ templates: {{dialogue_turns.utterance[1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -43,6 +47,8 @@ templates: {{dialogue_turns.utterance[1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false diff --git a/promptsource/templates/mdd/task3_qarecs/templates.yaml b/promptsource/templates/mdd/task3_qarecs/templates.yaml index d8f2cf61e..a323105bc 100644 --- a/promptsource/templates/mdd/task3_qarecs/templates.yaml +++ b/promptsource/templates/mdd/task3_qarecs/templates.yaml @@ -29,6 +29,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -47,6 +49,8 @@ templates: {{dialogue_turns.utterance[1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -89,6 +93,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -155,6 +161,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -190,6 +198,8 @@ templates: {{dialogue_turns.utterance[-1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -208,6 +218,8 @@ templates: {{dialogue_turns.utterance[3]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false diff --git a/promptsource/templates/medal/templates.yaml b/promptsource/templates/medal/templates.yaml index f735858dc..7ddb431af 100644 --- a/promptsource/templates/medal/templates.yaml +++ b/promptsource/templates/medal/templates.yaml @@ -20,6 +20,8 @@ templates: {{ label[0] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -35,6 +37,8 @@ templates: \ on the PubMed abstract above?\n\n|||\n{{ tokenised_text[acronym] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -61,6 +65,8 @@ templates: {{ label[0] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -75,6 +81,8 @@ templates: \ for the token: \"{{ label[0] }}\"?\n\n|||\n{{ tokenised_text[acronym] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -89,6 +97,8 @@ templates: \ ') }} \n\n|||\n{{ tokenised_text[acronym] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -113,6 +123,8 @@ templates: {{ label[0] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/medical_questions_pairs/templates.yaml b/promptsource/templates/medical_questions_pairs/templates.yaml index fb0fd2b37..99951fe3e 100644 --- a/promptsource/templates/medical_questions_pairs/templates.yaml +++ b/promptsource/templates/medical_questions_pairs/templates.yaml @@ -3,14 +3,29 @@ templates: 18c92f97-0655-4f67-aca1-69f8e4fbb11e: !Template answer_choices: different ||| paraphrase id: 18c92f97-0655-4f67-aca1-69f8e4fbb11e - jinja: "In the context of healthcare questionnaires, it is often necessary to\ - \ find out if two questions are paraphrases of one another. Given the following\ - \ question:\n\nQuestion 1: {{question_1}}\n\nIs the following question a {{answer_choices[1]}} or - {{answer_choices[0]}}?\n\nQuestion 2: {{question_2}}\n\n + jinja: 'In the context of healthcare questionnaires, it is often necessary to + find out if two questions are paraphrases of one another. Given the following + question: - |||\n\n{{answer_choices[label]}}" + + Question 1: {{question_1}} + + + Is the following question a {{answer_choices[1]}} or {{answer_choices[0]}}? + + + Question 2: {{question_2}} + + + + ||| + + + {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -19,12 +34,13 @@ templates: 316f2ff7-45f8-4997-9c5f-dfe5fb7f9808: !Template answer_choices: False ||| True id: 316f2ff7-45f8-4997-9c5f-dfe5fb7f9808 - jinja: "Question 1: {{question_1}}\n\nand\n\nQuestion 2: {{question_2}}\n\n - Is\ - \ it {{answer_choices[1]}} or {{answer_choices[0]}} that the two questions above are paraphrases of each other?\n\ - \n|||\n\n {{answer_choices[label]}} " + jinja: "Question 1: {{question_1}}\n\nand\n\nQuestion 2: {{question_2}}\n\n Is\ + \ it {{answer_choices[1]}} or {{answer_choices[0]}} that the two questions above\ + \ are paraphrases of each other?\n\n|||\n\n {{answer_choices[label]}} " metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -50,6 +66,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,13 +76,13 @@ templates: 681dc0d2-a771-41ae-aa00-d1f59ab01197: !Template answer_choices: not duplicates ||| duplicates id: 681dc0d2-a771-41ae-aa00-d1f59ab01197 - jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\n - Pick one of the following options:\n - Questions are {{answer_choices[1]}} or {{answer_choices[0]}}\n\n - ||| - {{ answer_choices[label] }}" + jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\n Pick one\ + \ of the following options:\n Questions are {{answer_choices[1]}} or {{answer_choices[0]}}\n\ + \n ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -74,11 +92,12 @@ templates: answer_choices: No ||| Yes id: 7be2b267-8d5c-466b-9fd4-1fbbae442938 jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\nQuestion:\ - \ Is Question 1 asking the same question as Question 2? {{answer_choices[1]}} or {{answer_choices[0]}}?\n\n\n - ||| - {{ answer_choices[label] }}" + \ Is Question 1 asking the same question as Question 2? {{answer_choices[1]}}\ + \ or {{answer_choices[0]}}?\n\n\n ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -88,11 +107,12 @@ templates: answer_choices: False ||| True id: 854ebbe0-8968-4967-a346-4e4d6f98cf73 jinja: "Question 1: {{question_1}}\n\nOne possible way of paraphrasing the same\ - \ question is: \n\nQuestion 2: {{question_2}}\n\n{{answer_choices[1]}} or {{answer_choices[0]}}?\n\n - ||| - {{ answer_choices[label] }}" + \ question is: \n\nQuestion 2: {{question_2}}\n\n{{answer_choices[1]}} or {{answer_choices[0]}}?\n\ + \n ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -101,19 +121,19 @@ templates: b388913a-9b0d-43a1-8bf9-83319ebf38b2: !Template answer_choices: related question ||| rewrite id: b388913a-9b0d-43a1-8bf9-83319ebf38b2 - jinja: "Original question: {{question_1}} + jinja: 'Original question: {{question_1}} - Given this question, doctors were asked to either: - - Rewrite the question so that it kept the same intent - - Create a related question for which the original answer is unrelated or wrong + Given this question, doctors were asked to either: - Rewrite the question so + that it kept the same intent - Create a related question for which the original + answer is unrelated or wrong - Is the following question a {{answer_choices[1]}} or {{answer_choices[0]}}? + Is the following question a {{answer_choices[1]}} or {{answer_choices[0]}}? - New question: {{question_2}} - ||| - {{answer_choices[label]}}" + New question: {{question_2}} ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,13 +142,12 @@ templates: c8df74ce-0ae7-4e70-9322-aaf9921ae3b1: !Template answer_choices: dissimilar ||| similar id: c8df74ce-0ae7-4e70-9322-aaf9921ae3b1 - jinja: "These two questions are either {{answer_choices[1]}} or {{answer_choices[0]}}. - \n\n{{question_1}} \n\nand\n\n{{question_2}}\n\ - \nWhich is it? - ||| - {{answer_choices[label]}}" + jinja: "These two questions are either {{answer_choices[1]}} or {{answer_choices[0]}}.\ + \ \n\n{{question_1}} \n\nand\n\n{{question_2}}\n\nWhich is it? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/meta_woz/dialogues/templates.yaml b/promptsource/templates/meta_woz/dialogues/templates.yaml index 8f185d041..6dfea4dfa 100644 --- a/promptsource/templates/meta_woz/dialogues/templates.yaml +++ b/promptsource/templates/meta_woz/dialogues/templates.yaml @@ -10,6 +10,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -38,6 +40,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -54,6 +58,8 @@ templates: Fill in the blank \n|||\n{{turns[rand_index.value] }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -69,6 +75,8 @@ templates: \ ' ') | lower }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -83,6 +91,8 @@ templates: \ ' ') | lower }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -106,6 +116,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -130,6 +142,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -154,6 +168,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU diff --git a/promptsource/templates/mocha/templates.yaml b/promptsource/templates/mocha/templates.yaml index ca49a503e..3871dad62 100644 --- a/promptsource/templates/mocha/templates.yaml +++ b/promptsource/templates/mocha/templates.yaml @@ -9,6 +9,8 @@ templates: \n|||\n{{ score }}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -24,6 +26,8 @@ templates: \ %} \n|||\n{{ question }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -40,6 +44,8 @@ templates: \ %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -64,6 +70,8 @@ templates: {{ score }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -89,6 +97,8 @@ templates: {{ score }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -103,6 +113,8 @@ templates: \ \"{{reference}}\" \n|||\n{{ score }}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -122,6 +134,8 @@ templates: {{ reference }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -147,6 +161,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true diff --git a/promptsource/templates/movie_rationales/templates.yaml b/promptsource/templates/movie_rationales/templates.yaml index 36b132dba..dd333c98e 100644 --- a/promptsource/templates/movie_rationales/templates.yaml +++ b/promptsource/templates/movie_rationales/templates.yaml @@ -7,6 +7,8 @@ templates: \ |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -25,6 +27,8 @@ templates: ? ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -46,6 +50,8 @@ templates: this review is {{answer_choices[0]}} or {{answer_choices[1]}} . ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -64,6 +70,8 @@ templates: - {{ evidences | join("\n- ") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -85,6 +93,8 @@ templates: - {{ evidences | join("\n- ") }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/multi_news/templates.yaml b/promptsource/templates/multi_news/templates.yaml index 23fffa212..2cea0f984 100644 --- a/promptsource/templates/multi_news/templates.yaml +++ b/promptsource/templates/multi_news/templates.yaml @@ -6,6 +6,8 @@ templates: jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", "") | list %} + {% if document != "" %} + What are the key points across these news articles: {% for doc in docs %} @@ -17,9 +19,13 @@ templates: ||| - {{summary[2:]}}' + {{summary[2:]}} + + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -32,6 +38,8 @@ templates: jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", "") | list %} + {% if document != "" %} + Synthesize these documents into a single one: {% for doc in docs %} @@ -43,9 +51,13 @@ templates: ||| - {{summary[2:]}}' + {{summary[2:]}} + + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -58,6 +70,8 @@ templates: jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", "") | list %} + {% if document != "" %} + I want to edit the following articles into a more concise summary: {% for doc in docs %} @@ -69,9 +83,13 @@ templates: ||| - {{summary[2:]}}' + {{summary[2:]}} + + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -84,6 +102,8 @@ templates: jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", "") | list %} + {% if document != "" %} + Write a summary of the following articles: {% for doc in docs %} @@ -95,9 +115,13 @@ templates: ||| - {{summary[2:]}}' + {{summary[2:]}} + + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -110,15 +134,21 @@ templates: jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", "") | list%} + {% if document != "" %} + Write an expanded news article with plausible details from the following summary: {{summary[2:]}} ||| - {{docs | choice}}' + {{docs | choice}} + + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -131,6 +161,8 @@ templates: jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", "") | list %} + {% if document != "" %} + I''m trying to distill these articles down into one: {% for doc in docs %} @@ -142,9 +174,13 @@ templates: ||| - {{summary[2:]}}' + {{summary[2:]}} + + {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/multi_nli/templates.yaml b/promptsource/templates/multi_nli/templates.yaml index 743742ad5..e6de4e00b 100644 --- a/promptsource/templates/multi_nli/templates.yaml +++ b/promptsource/templates/multi_nli/templates.yaml @@ -1,5 +1,4 @@ dataset: multi_nli -subset: None templates: 001bd025-1fcb-4c4b-b5dd-d8bb83f82d13: !Template answer_choices: True ||| Inconclusive ||| False @@ -9,6 +8,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +22,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +36,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +50,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +65,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +79,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -83,6 +94,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -96,6 +109,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +125,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +141,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +156,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -149,6 +170,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -161,6 +184,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -173,6 +198,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -185,6 +212,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/multi_x_science_sum/templates.yaml b/promptsource/templates/multi_x_science_sum/templates.yaml index 246787b7a..ad69ad161 100644 --- a/promptsource/templates/multi_x_science_sum/templates.yaml +++ b/promptsource/templates/multi_x_science_sum/templates.yaml @@ -19,6 +19,8 @@ templates: {{related_work}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -35,6 +37,8 @@ templates: {{related_work}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -61,6 +65,8 @@ templates: {{related_work}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -79,6 +85,8 @@ templates: {{abstract}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: false @@ -103,6 +111,8 @@ templates: {{related_work}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -127,6 +137,8 @@ templates: {{related_work}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -147,6 +159,8 @@ templates: {{abstract}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: false diff --git a/promptsource/templates/mwsc/templates.yaml b/promptsource/templates/mwsc/templates.yaml index a4a710712..5dfc0a52a 100644 --- a/promptsource/templates/mwsc/templates.yaml +++ b/promptsource/templates/mwsc/templates.yaml @@ -7,6 +7,8 @@ templates: ||| {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: {{ answer }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: }} ||| {{ answer }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -40,10 +46,12 @@ templates: ad4b74f6-6b2f-40a8-8189-4ada58d64fd4: !Template answer_choices: '{{options | join(" ||| ")}}' id: ad4b74f6-6b2f-40a8-8189-4ada58d64fd4 - jinja: "{{sentence}} {{ question }} Was it \"{{options|join('\" or \"')}}\"\ - ? ||| {{ answer }}" + jinja: '{{sentence}} {{ question }} Was it "{{options|join(''" or "'')}}"? ||| + {{ answer }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -57,6 +65,8 @@ templates: {% else %} {{answer_choices[1]}} {% endif %} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/narrativeqa/templates.yaml b/promptsource/templates/narrativeqa/templates.yaml index cb9a26d3a..f20511d41 100644 --- a/promptsource/templates/narrativeqa/templates.yaml +++ b/promptsource/templates/narrativeqa/templates.yaml @@ -17,6 +17,8 @@ templates: {{answers | map(attribute="text") | list | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -37,6 +39,8 @@ templates: {{answers | map(attribute=''text'') | list | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -54,6 +58,8 @@ templates: {{ document.summary.text }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -68,6 +74,8 @@ templates: ) | list | choice }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -88,6 +96,8 @@ templates: {{answers | map(attribute="text") | list | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -111,6 +121,8 @@ templates: {{answers | map(attribute="text") | list | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -128,6 +140,8 @@ templates: {{ document.summary.text }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -141,6 +155,8 @@ templates: \ }}\n |||\n{{ document.summary.text }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU diff --git a/promptsource/templates/ncbi_disease/templates.yaml b/promptsource/templates/ncbi_disease/templates.yaml index b0c56cf04..6318a4842 100644 --- a/promptsource/templates/ncbi_disease/templates.yaml +++ b/promptsource/templates/ncbi_disease/templates.yaml @@ -40,6 +40,8 @@ templates: {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -49,7 +51,8 @@ templates: 4e96f535-07d4-4c71-8816-3c1cb1900090: !Template answer_choices: null id: 4e96f535-07d4-4c71-8816-3c1cb1900090 - jinja: 'Identify the names of diseases mentioned in the following text (if no diseases are mentioned, output {{"None"}}): + jinja: 'Identify the names of diseases mentioned in the following text (if no + diseases are mentioned, output {{"None"}}): {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace(" ;", ";") | replace(" :", ":") | replace(" -", "-") }} @@ -86,6 +89,8 @@ templates: {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -117,6 +122,8 @@ templates: {{answer_choices[0] if vars.no_disease else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -162,6 +169,8 @@ templates: {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -170,7 +179,8 @@ templates: ecd88889-84fa-4e28-ac0f-3bc1564e838b: !Template answer_choices: null id: ecd88889-84fa-4e28-ac0f-3bc1564e838b - jinja: 'List the diseases mentioned in the following text (write {{"None"}} if no disease is mentioned): + jinja: 'List the diseases mentioned in the following text (write {{"None"}} if + no disease is mentioned): {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace(" ;", ";") | replace(" :", ":") | replace(" -", "-") }} @@ -207,6 +217,8 @@ templates: {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -252,6 +264,8 @@ templates: {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -283,6 +297,8 @@ templates: {{answer_choices[0] if vars.no_disease else answer_choices[1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml b/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml index a0db9a972..10293c2af 100644 --- a/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml +++ b/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml @@ -18,6 +18,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -39,6 +41,8 @@ templates: {{ question }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/newspop/templates.yaml b/promptsource/templates/newspop/templates.yaml index 6183c50a9..d2c240dc0 100644 --- a/promptsource/templates/newspop/templates.yaml +++ b/promptsource/templates/newspop/templates.yaml @@ -8,6 +8,8 @@ templates: \n{{topic|capitalize}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: \n|||\n\n{{topic|capitalize}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: |||\n\n{{topic|capitalize}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +49,8 @@ templates: jinja: "{{title}}\n{{headline}}\n\nTopic: \n\n|||\n\n{{topic|capitalize}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +63,8 @@ templates: \ {{answer_choices[2]}} or {{answer_choices[3]}}?\n|||\n\n{{topic|capitalize}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -75,6 +85,8 @@ templates: {{topic|capitalize}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -87,6 +99,8 @@ templates: |||\n\n{{title}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -101,6 +115,8 @@ templates: \ {{headline}}\n\n|||\n\n{{topic|capitalize}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/nlu_evaluation_data/templates.yaml b/promptsource/templates/nlu_evaluation_data/templates.yaml index 00e73b887..f99cc205a 100644 --- a/promptsource/templates/nlu_evaluation_data/templates.yaml +++ b/promptsource/templates/nlu_evaluation_data/templates.yaml @@ -33,6 +33,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +57,8 @@ templates: is actually in answer_choices #}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -91,6 +95,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -103,6 +109,8 @@ templates: {{scenario}}\n|||\n{{text}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: false @@ -134,6 +142,8 @@ templates: \ | join(', ') }} or {{answer_choices[-1]}}.\n{{text}}\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -170,6 +180,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -190,6 +202,8 @@ templates: is actually in answer_choices #}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/nq_open/templates.yaml b/promptsource/templates/nq_open/templates.yaml index 2caf9bfef..9b82c4644 100644 --- a/promptsource/templates/nq_open/templates.yaml +++ b/promptsource/templates/nq_open/templates.yaml @@ -15,6 +15,8 @@ templates: {{answer|choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -34,6 +36,8 @@ templates: {{answer | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -50,6 +54,8 @@ templates: {{question}}?' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -66,6 +72,8 @@ templates: {{answer|choice}} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -84,6 +92,8 @@ templates: {{answer|choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -101,6 +111,8 @@ templates: {{answer|choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/numer_sense/templates.yaml b/promptsource/templates/numer_sense/templates.yaml index cc9515542..1e68ad993 100644 --- a/promptsource/templates/numer_sense/templates.yaml +++ b/promptsource/templates/numer_sense/templates.yaml @@ -9,6 +9,8 @@ templates: \ options:\n\n{{ ', '.join(answer_choices) }}\n\n||| \n\n{{target}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: {{target}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +62,8 @@ templates: {{target}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -81,6 +87,8 @@ templates: {{target}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -95,6 +103,8 @@ templates: \ '.join(answer_choices) }}\n\n||| \n\n{{target}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/onestop_english/templates.yaml b/promptsource/templates/onestop_english/templates.yaml index 4ade30f88..a17c13da6 100644 --- a/promptsource/templates/onestop_english/templates.yaml +++ b/promptsource/templates/onestop_english/templates.yaml @@ -17,6 +17,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -38,6 +40,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -63,6 +67,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -87,6 +93,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +116,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +132,8 @@ templates: }} level?\n\n|||\n\n{{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/openai_humaneval/templates.yaml b/promptsource/templates/openai_humaneval/templates.yaml index 99e7bf71a..08e9e6d0b 100644 --- a/promptsource/templates/openai_humaneval/templates.yaml +++ b/promptsource/templates/openai_humaneval/templates.yaml @@ -12,6 +12,8 @@ templates: {{ canonical_solution }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -32,6 +34,8 @@ templates: \ %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false diff --git a/promptsource/templates/openbookqa/additional/templates.yaml b/promptsource/templates/openbookqa/additional/templates.yaml index cf7b1c29d..a6bbda464 100644 --- a/promptsource/templates/openbookqa/additional/templates.yaml +++ b/promptsource/templates/openbookqa/additional/templates.yaml @@ -16,6 +16,8 @@ templates: {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -36,6 +38,8 @@ templates: {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +63,8 @@ templates: {{answerKey}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +85,8 @@ templates: {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -99,6 +107,8 @@ templates: {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +126,8 @@ templates: {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -136,6 +148,8 @@ templates: {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/openbookqa/main/templates.yaml b/promptsource/templates/openbookqa/main/templates.yaml index e3c334d09..7b5a5cb17 100644 --- a/promptsource/templates/openbookqa/main/templates.yaml +++ b/promptsource/templates/openbookqa/main/templates.yaml @@ -16,6 +16,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -36,6 +38,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +63,8 @@ templates: {{answerKey}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +85,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -96,6 +104,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +126,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -136,6 +148,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/paws-x/en/templates.yaml b/promptsource/templates/paws-x/en/templates.yaml index 102c1a4d1..701870b9e 100644 --- a/promptsource/templates/paws-x/en/templates.yaml +++ b/promptsource/templates/paws-x/en/templates.yaml @@ -8,6 +8,8 @@ templates: Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: ||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +50,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -56,6 +64,8 @@ templates: Yes or No.\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +78,8 @@ templates: \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +92,8 @@ templates: {{sentence2}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -93,6 +107,8 @@ templates: \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +134,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +148,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -139,6 +161,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/paws/labeled_final/templates.yaml b/promptsource/templates/paws/labeled_final/templates.yaml index f1fb16b4e..86a0e7a8c 100644 --- a/promptsource/templates/paws/labeled_final/templates.yaml +++ b/promptsource/templates/paws/labeled_final/templates.yaml @@ -8,6 +8,8 @@ templates: Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: ||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +50,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -56,6 +64,8 @@ templates: Yes or No.\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +78,8 @@ templates: \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +92,8 @@ templates: {{sentence2}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -93,6 +107,8 @@ templates: \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +134,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +148,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -139,6 +161,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/paws/labeled_swap/templates.yaml b/promptsource/templates/paws/labeled_swap/templates.yaml index 1f9dd5df0..802ee2591 100644 --- a/promptsource/templates/paws/labeled_swap/templates.yaml +++ b/promptsource/templates/paws/labeled_swap/templates.yaml @@ -8,6 +8,8 @@ templates: Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: ||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +50,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -56,6 +64,8 @@ templates: Yes or No.\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +78,8 @@ templates: \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +92,8 @@ templates: {{sentence2}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -93,6 +107,8 @@ templates: \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +134,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +148,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -139,6 +161,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/paws/unlabeled_final/templates.yaml b/promptsource/templates/paws/unlabeled_final/templates.yaml index 53c645306..b1110b235 100644 --- a/promptsource/templates/paws/unlabeled_final/templates.yaml +++ b/promptsource/templates/paws/unlabeled_final/templates.yaml @@ -8,6 +8,8 @@ templates: Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: ||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +50,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -56,6 +64,8 @@ templates: Yes or No.\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +78,8 @@ templates: \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -80,6 +92,8 @@ templates: {{sentence2}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -93,6 +107,8 @@ templates: \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -116,6 +134,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +148,8 @@ templates: \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -139,6 +161,8 @@ templates: jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/piqa/templates.yaml b/promptsource/templates/piqa/templates.yaml index 82d6eee30..b9a607643 100644 --- a/promptsource/templates/piqa/templates.yaml +++ b/promptsource/templates/piqa/templates.yaml @@ -20,6 +20,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +48,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -74,6 +78,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -87,6 +93,8 @@ templates: |||\n{{[sol1, sol2][label]}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -114,6 +122,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -131,6 +141,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -155,6 +167,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -177,6 +191,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -193,6 +209,8 @@ templates: {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -213,6 +231,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -235,6 +255,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/poem_sentiment/templates.yaml b/promptsource/templates/poem_sentiment/templates.yaml index dc27b2c20..ae3da7cd7 100644 --- a/promptsource/templates/poem_sentiment/templates.yaml +++ b/promptsource/templates/poem_sentiment/templates.yaml @@ -8,6 +8,8 @@ templates: answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: [label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +52,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +67,8 @@ templates: answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +82,8 @@ templates: \ ||| {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -84,6 +96,8 @@ templates: or {{"mixed"}} sentiment? {{verse_text}} ||| {{ answer_choices [label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -96,6 +110,8 @@ templates: feel through the verse mentioned above?||| {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml b/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml index 38331d8a4..37f10efc3 100644 --- a/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml +++ b/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml @@ -9,6 +9,8 @@ templates: \ answered by the above research abstract? \n|||\n{{question}} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -22,6 +24,8 @@ templates: \ this question: \"{{question}}\" \n||| \n{{long_answer}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -36,6 +40,8 @@ templates: \ is the question answered by the above research abstract? \n|||\n{{question}} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -43,12 +49,14 @@ templates: name: 'Generate Question Title ' reference: Given abstract, generate title (which is in the form of a question) 1e0a77f8-0eb4-40a1-814d-8a111df66e5e: !Template - answer_choices: 'Yes ||| No ||| Maybe' + answer_choices: Yes ||| No ||| Maybe id: 1e0a77f8-0eb4-40a1-814d-8a111df66e5e jinja: "Question: \"{{ question }}\" \n\nAnswer: \"{{ long_answer }}\"\n\nSummarize\ \ the above answer as YES, NO, or MAYBE? \n|||\n{{final_decision}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -65,6 +73,8 @@ templates: {{ context.labels[choice] }}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -74,9 +84,12 @@ templates: answer_choices: null id: 45cb344c-bb36-492a-ace0-7cfc897e127a jinja: "Given a PubMed abstract:{{ context.contexts | join(\", \") }}\n\nWhat\ - \ are the MeSH (Medical Subject Headings) terms for this? \n|||\n{{ context.meshes | join(\", \") }}" + \ are the MeSH (Medical Subject Headings) terms for this? \n|||\n{{ context.meshes\ + \ | join(\", \") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -88,10 +101,12 @@ templates: answer_choices: null id: 48ee58bb-6a4a-4667-9d9c-69618408c6ce jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\nAnd\ - \ given long answer of a question: {{long_answer}}.\n \nWhat is the question asked by this research paper?\ - \ \n|||\n{{question}} " + \ given long answer of a question: {{long_answer}}.\n \nWhat is the question\ + \ asked by this research paper? \n|||\n{{question}} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -99,13 +114,15 @@ templates: name: Generate Question Title with long answer reference: '' 91d481e5-fac6-4532-b013-5ac1235b6e1a: !Template - answer_choices: 'Yes ||| No ||| Maybe' + answer_choices: Yes ||| No ||| Maybe id: 91d481e5-fac6-4532-b013-5ac1235b6e1a jinja: "Given a PubMed abstract: {{ context.contexts | join(\", \") }}\n\nAnswer\ \ the question: \"{{question}}\" as YES, NO, MAYBE.\n||| \n{{final_decision}}\n\ \n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/qa_srl/templates.yaml b/promptsource/templates/qa_srl/templates.yaml index 76e762009..73bedda45 100644 --- a/promptsource/templates/qa_srl/templates.yaml +++ b/promptsource/templates/qa_srl/templates.yaml @@ -15,6 +15,8 @@ templates: {{question | join(" ") | replace("_ ", "")}} ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -33,6 +35,8 @@ templates: {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -50,6 +54,8 @@ templates: {{predicate}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -65,6 +71,8 @@ templates: {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -83,6 +91,8 @@ templates: {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -100,6 +110,8 @@ templates: {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -117,6 +129,8 @@ templates: {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/qa_zre/templates.yaml b/promptsource/templates/qa_zre/templates.yaml index bac8d26a1..2a2c2d8f2 100644 --- a/promptsource/templates/qa_zre/templates.yaml +++ b/promptsource/templates/qa_zre/templates.yaml @@ -3,13 +3,16 @@ templates: 2d6b6ec6-4cba-4a07-a0d1-f6b7cb103281: !Template answer_choices: null id: 2d6b6ec6-4cba-4a07-a0d1-f6b7cb103281 - jinja: 'The following question is asking about a specific relation. What is this relation? + jinja: 'The following question is asking about a specific relation. What is this + relation? Question: {{question}} ||| {{relation}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -19,9 +22,9 @@ templates: answer_choices: null id: 5a970b88-53a0-4148-b45e-7ac410df263f jinja: 'Based on the context below, please answer the question: "{{question.replace("XXX",subject)}}". - If the context is not sufficient to answer, please write "unanswerable" instead. + If the context is not sufficient to answer, please write "unanswerable" instead. - Context: {{context}} + Context: {{context}} ||| @@ -33,10 +36,11 @@ templates: unanswerable - {% endif %} - ' + {% endif %} ' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: true @@ -55,6 +59,8 @@ templates: {{subject}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -71,8 +77,10 @@ templates: {{relation}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - - Accuracy + - Accuracy original_task: false name: relation2 reference: '' @@ -81,8 +89,8 @@ templates: id: b2195890-a3c5-4e33-be4a-5e53af75e6dd jinja: ' - You will find below a context and a question. Please answer the question or write "unanswerable" if the question - cannot be answered using the context. + You will find below a context and a question. Please answer the question or + write "unanswerable" if the question cannot be answered using the context. Context: {{context}} @@ -97,9 +105,10 @@ templates: unanswerable {% endif %} ' - metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -108,13 +117,12 @@ templates: b2195890-a3c5-4e33-be4a-5e53af75e7dd: !Template answer_choices: null id: b2195890-a3c5-4e33-be4a-5e53af75e7dd - jinja: ' - Question: {{question.replace("XXX",subject)}} + jinja: ' Question: {{question.replace("XXX",subject)}} Context: {{context}} - Please answer the question above using a passage present in the context. If no passage is a good answer for the - question, please write "unanswerable" instead. + Please answer the question above using a passage present in the context. If + no passage is a good answer for the question, please write "unanswerable" instead. ||| @@ -127,9 +135,10 @@ templates: unanswerable {% endif %} ' - metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -138,13 +147,12 @@ templates: b2195890-a3c5-4e33-be4a-5e53af75e8dd: !Template answer_choices: null id: b2195890-a3c5-4e33-be4a-5e53af75e8dd - jinja: ' - Question: {{question.replace("XXX",subject)}} + jinja: ' Question: {{question.replace("XXX",subject)}} Context: {{context}} - Please copy the span in the context that best answers the question. If there is no such span, please output - "unanswerable" instead. + Please copy the span in the context that best answers the question. If there + is no such span, please output "unanswerable" instead. ||| @@ -157,9 +165,10 @@ templates: unanswerable {% endif %} ' - metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -168,13 +177,13 @@ templates: b2195890-a3c5-4e33-be4a-5e53af75e9dd: !Template answer_choices: null id: b2195890-a3c5-4e33-be4a-5e53af75e9dd - jinja: ' - Question: {{question.replace("XXX",subject)}} + jinja: ' Question: {{question.replace("XXX",subject)}} Context: {{context}} - The following context may contain an answer to the question. If it does, please copy the span that best - answers it. If it does not, mention that the question is "unanswerable" using the context. + The following context may contain an answer to the question. If it does, please + copy the span that best answers it. If it does not, mention that the question + is "unanswerable" using the context. ||| @@ -187,9 +196,10 @@ templates: unanswerable {% endif %} ' - metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/qasc/templates.yaml b/promptsource/templates/qasc/templates.yaml index ce395f3a8..5d05a2235 100644 --- a/promptsource/templates/qasc/templates.yaml +++ b/promptsource/templates/qasc/templates.yaml @@ -10,6 +10,8 @@ templates: \ {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -25,6 +27,8 @@ templates: \ == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif %}{% endfor %} " metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -40,6 +44,8 @@ templates: \ - 1] }}{% endif %}{% endfor %} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -56,6 +62,8 @@ templates: \ - 1] }}{% endif %}{% endfor %} " metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +80,8 @@ templates: \ %}{{ answer_choices[loop.index - 1] }}{% endif %}{% endfor %} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -87,6 +97,8 @@ templates: \ %}{% endfor %} " metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -102,6 +114,8 @@ templates: \ %} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -118,6 +132,8 @@ templates: \ %}{% endfor %} " metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/qed/templates.yaml b/promptsource/templates/qed/templates.yaml index e77abadca..a9fab8e4f 100644 --- a/promptsource/templates/qed/templates.yaml +++ b/promptsource/templates/qed/templates.yaml @@ -7,6 +7,8 @@ templates: \ \n|||\n\n{{title_text}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -25,6 +27,8 @@ templates: {{ chosen["string"]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -39,6 +43,8 @@ templates: \nHint: {{paragraph_text}} \n\n|||\n{{ chosen[\"string\"]}}\n{% endif %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -58,6 +64,8 @@ templates: {{ title_text }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -70,6 +78,8 @@ templates: \ and ask me a reasonable question? \n\n{{paragraph_text}} \n||| \n\n{{question}}?" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -100,6 +110,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/quac/templates.yaml b/promptsource/templates/quac/templates.yaml index de8a977fc..d19e1a210 100644 --- a/promptsource/templates/quac/templates.yaml +++ b/promptsource/templates/quac/templates.yaml @@ -10,6 +10,8 @@ templates: ) }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -25,6 +27,8 @@ templates: \ choice).replace(\"CANNOTANSWER\",\"Cannot answer\") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -53,6 +57,8 @@ templates: {{(answers.texts | last | choice).replace("CANNOTANSWER","Cannot answer") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -69,6 +75,8 @@ templates: ) }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -85,6 +93,8 @@ templates: ,\"Cannot answer\") }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -119,6 +129,8 @@ templates: {{(answers.texts | last | choice).replace("CANNOTANSWER","Cannot answer") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/quail/templates.yaml b/promptsource/templates/quail/templates.yaml index c7dadcd92..79aef8c57 100644 --- a/promptsource/templates/quail/templates.yaml +++ b/promptsource/templates/quail/templates.yaml @@ -24,6 +24,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -49,6 +51,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +81,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -98,6 +104,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -119,6 +127,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -138,6 +148,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -163,6 +175,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -186,6 +200,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -214,6 +230,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -235,6 +253,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -254,6 +274,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -278,6 +300,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -302,6 +326,8 @@ templates: {{ answer_choices[correct_answer_id] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/quarel/templates.yaml b/promptsource/templates/quarel/templates.yaml index a5dd0e5aa..521693bd4 100644 --- a/promptsource/templates/quarel/templates.yaml +++ b/promptsource/templates/quarel/templates.yaml @@ -14,6 +14,8 @@ templates: {{answer_choices[answer_index]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -32,6 +34,8 @@ templates: {{answer_choices[answer_index]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -50,6 +54,8 @@ templates: {{answer_choices[answer_index]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -67,6 +73,8 @@ templates: {{answer_choices[answer_index]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -86,6 +94,8 @@ templates: {{answer_choices[answer_index]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/quartz/templates.yaml b/promptsource/templates/quartz/templates.yaml index 8dad7f829..5e9ea1b44 100755 --- a/promptsource/templates/quartz/templates.yaml +++ b/promptsource/templates/quartz/templates.yaml @@ -10,6 +10,8 @@ templates: \ %}\n\nParagraph :\n\n{{ para }}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -24,6 +26,8 @@ templates: \ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +43,8 @@ templates: \ join(\" or \") }}{{ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -54,6 +60,8 @@ templates: \n{{ para }}|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -69,6 +77,8 @@ templates: {{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +95,8 @@ templates: \ %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -101,6 +113,8 @@ templates: \ %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -115,6 +129,8 @@ templates: \ }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/quora/templates.yaml b/promptsource/templates/quora/templates.yaml index ab57bef56..4344878d1 100644 --- a/promptsource/templates/quora/templates.yaml +++ b/promptsource/templates/quora/templates.yaml @@ -7,6 +7,8 @@ templates: as a duplicate?||| {{ answer_choices [is_duplicate] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: ||| {{ answer_choices [is_duplicate] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +37,8 @@ templates: {% endif %}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -46,6 +52,8 @@ templates: {{"True"}} or {{"false"}}? ||| {{ answer_choices [is_duplicate] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +66,8 @@ templates: {{"True"}} or {{"false"}} ? ||| {{ answer_choices [is_duplicate] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +80,8 @@ templates: I feel like they have asked the same question. Am I correct? ||| {{answer_choices[is_duplicate]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/quoref/templates.yaml b/promptsource/templates/quoref/templates.yaml index 3054b3744..44586d480 100644 --- a/promptsource/templates/quoref/templates.yaml +++ b/promptsource/templates/quoref/templates.yaml @@ -14,6 +14,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -36,6 +38,8 @@ templates: {{answers.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -48,6 +52,8 @@ templates: \ , can you please find it? \n\n{{context}}|||\n{{answers.text | choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -64,6 +70,8 @@ templates: {{answers.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -83,6 +91,8 @@ templates: {{answers.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -102,6 +112,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -119,6 +131,8 @@ templates: {{answers.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -138,6 +152,8 @@ templates: {{title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -157,6 +173,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -176,6 +194,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -193,6 +213,8 @@ templates: {{answers.text | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/race/all/templates.yaml b/promptsource/templates/race/all/templates.yaml index 5bd0eba24..028420ab6 100644 --- a/promptsource/templates/race/all/templates.yaml +++ b/promptsource/templates/race/all/templates.yaml @@ -24,6 +24,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -38,6 +40,8 @@ templates: {{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -60,6 +64,8 @@ templates: {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -81,6 +87,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -111,6 +119,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -126,6 +136,8 @@ templates: B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -154,6 +166,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -179,6 +193,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/race/high/templates.yaml b/promptsource/templates/race/high/templates.yaml index a0157b50b..28c73e13a 100644 --- a/promptsource/templates/race/high/templates.yaml +++ b/promptsource/templates/race/high/templates.yaml @@ -26,6 +26,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -40,6 +42,8 @@ templates: {{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -69,6 +73,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -96,6 +102,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -111,6 +119,8 @@ templates: B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -133,6 +143,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -158,6 +170,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -179,6 +193,8 @@ templates: {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/race/middle/templates.yaml b/promptsource/templates/race/middle/templates.yaml index 2e34756b4..311f506ba 100644 --- a/promptsource/templates/race/middle/templates.yaml +++ b/promptsource/templates/race/middle/templates.yaml @@ -23,6 +23,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +46,8 @@ templates: {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -74,6 +78,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -99,6 +105,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -120,6 +128,8 @@ templates: {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -135,6 +145,8 @@ templates: B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -150,6 +162,8 @@ templates: {{answer}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -179,6 +193,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/riddle_sense/templates.yaml b/promptsource/templates/riddle_sense/templates.yaml index 773dccb69..48fea7b21 100644 --- a/promptsource/templates/riddle_sense/templates.yaml +++ b/promptsource/templates/riddle_sense/templates.yaml @@ -15,6 +15,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -40,6 +42,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +63,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -89,6 +95,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +117,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/ropes/templates.yaml b/promptsource/templates/ropes/templates.yaml index 22e42c437..93e82240b 100644 --- a/promptsource/templates/ropes/templates.yaml +++ b/promptsource/templates/ropes/templates.yaml @@ -8,6 +8,8 @@ templates: Hint: {{ background }}\n|||\n{{ answers.text | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -21,6 +23,8 @@ templates: \ | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -47,6 +51,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -70,6 +76,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -93,6 +101,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -118,6 +128,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -143,6 +155,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -166,6 +180,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -189,6 +205,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -212,6 +230,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -225,6 +245,8 @@ templates: \ }}\n|||\n{{ answers.text | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -249,6 +271,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/rotten_tomatoes/templates.yaml b/promptsource/templates/rotten_tomatoes/templates.yaml index 7197e1c7b..cb5fdd5a3 100644 --- a/promptsource/templates/rotten_tomatoes/templates.yaml +++ b/promptsource/templates/rotten_tomatoes/templates.yaml @@ -7,6 +7,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +35,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +49,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +63,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +77,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +91,8 @@ templates: answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +105,8 @@ templates: [label] }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -103,6 +119,8 @@ templates: ||| {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -115,6 +133,8 @@ templates: [label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/samsum/templates.yaml b/promptsource/templates/samsum/templates.yaml index 7d92f163b..bd7699c56 100644 --- a/promptsource/templates/samsum/templates.yaml +++ b/promptsource/templates/samsum/templates.yaml @@ -8,6 +8,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -23,6 +25,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -36,6 +40,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -51,6 +57,8 @@ templates: |||{{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -66,6 +74,8 @@ templates: |||{{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true @@ -79,6 +89,8 @@ templates: {{dialogue}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: false @@ -90,6 +102,8 @@ templates: jinja: "Sum up the following dialogue: \n{{dialogue}}\n|||{{summary}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE original_task: true diff --git a/promptsource/templates/scan/addprim_jump/templates.yaml b/promptsource/templates/scan/addprim_jump/templates.yaml index cd712710d..0e2bb7eaf 100644 --- a/promptsource/templates/scan/addprim_jump/templates.yaml +++ b/promptsource/templates/scan/addprim_jump/templates.yaml @@ -13,6 +13,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -45,6 +49,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -62,6 +68,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -79,6 +87,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -95,6 +105,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -111,6 +123,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +140,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -142,6 +158,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -158,6 +176,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -174,6 +194,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/addprim_turn_left/templates.yaml b/promptsource/templates/scan/addprim_turn_left/templates.yaml index 9e91aa991..f2c14a0e5 100644 --- a/promptsource/templates/scan/addprim_turn_left/templates.yaml +++ b/promptsource/templates/scan/addprim_turn_left/templates.yaml @@ -12,6 +12,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -27,6 +29,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +48,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +66,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -77,6 +85,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +103,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +121,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +138,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -141,6 +157,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -157,6 +175,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -173,6 +193,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/scan/filler_num0/templates.yaml b/promptsource/templates/scan/filler_num0/templates.yaml index 65c08c8b8..9682c558b 100644 --- a/promptsource/templates/scan/filler_num0/templates.yaml +++ b/promptsource/templates/scan/filler_num0/templates.yaml @@ -12,6 +12,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -29,6 +31,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +49,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +103,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +120,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -123,6 +137,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -141,6 +157,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -157,6 +175,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -174,6 +194,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/filler_num1/templates.yaml b/promptsource/templates/scan/filler_num1/templates.yaml index 3344a5821..392892dbc 100644 --- a/promptsource/templates/scan/filler_num1/templates.yaml +++ b/promptsource/templates/scan/filler_num1/templates.yaml @@ -12,6 +12,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -44,6 +48,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +66,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -76,6 +84,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -92,6 +102,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +120,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +138,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -141,6 +157,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -156,6 +174,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -174,6 +194,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/filler_num2/templates.yaml b/promptsource/templates/scan/filler_num2/templates.yaml index 153e7cf70..0546af0c9 100644 --- a/promptsource/templates/scan/filler_num2/templates.yaml +++ b/promptsource/templates/scan/filler_num2/templates.yaml @@ -12,6 +12,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +49,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -93,6 +103,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -110,6 +122,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -125,6 +139,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -141,6 +157,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -157,6 +175,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -173,6 +193,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/scan/filler_num3/templates.yaml b/promptsource/templates/scan/filler_num3/templates.yaml index b4f121bbb..eb1c12170 100644 --- a/promptsource/templates/scan/filler_num3/templates.yaml +++ b/promptsource/templates/scan/filler_num3/templates.yaml @@ -12,6 +12,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +48,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -61,6 +67,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +103,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -109,6 +121,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -125,6 +139,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -141,6 +157,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -157,6 +175,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -174,6 +194,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/length/templates.yaml b/promptsource/templates/scan/length/templates.yaml index 131fe34d9..743dd7a5a 100644 --- a/promptsource/templates/scan/length/templates.yaml +++ b/promptsource/templates/scan/length/templates.yaml @@ -11,6 +11,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -27,6 +29,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -45,6 +49,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +103,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +121,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -125,6 +139,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -141,6 +157,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -158,6 +176,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -174,6 +194,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/simple/templates.yaml b/promptsource/templates/scan/simple/templates.yaml index a5fc9d9e7..8daf67d4e 100644 --- a/promptsource/templates/scan/simple/templates.yaml +++ b/promptsource/templates/scan/simple/templates.yaml @@ -12,6 +12,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -28,6 +30,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -45,6 +49,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -92,6 +102,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +121,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -125,6 +139,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -142,6 +158,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -158,6 +176,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -174,6 +194,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/template_around_right/templates.yaml b/promptsource/templates/scan/template_around_right/templates.yaml index cd5d5f6b0..a107a738c 100644 --- a/promptsource/templates/scan/template_around_right/templates.yaml +++ b/promptsource/templates/scan/template_around_right/templates.yaml @@ -12,6 +12,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +47,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +64,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -75,6 +83,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +101,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -108,6 +120,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -125,6 +139,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -141,6 +157,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -157,6 +175,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -174,6 +194,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/template_jump_around_right/templates.yaml b/promptsource/templates/scan/template_jump_around_right/templates.yaml index 78b6dfbf4..ea20701ae 100644 --- a/promptsource/templates/scan/template_jump_around_right/templates.yaml +++ b/promptsource/templates/scan/template_jump_around_right/templates.yaml @@ -12,6 +12,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +48,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +66,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -76,6 +84,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +101,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +120,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +138,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -140,6 +156,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -157,6 +175,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -173,6 +193,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/scan/template_opposite_right/templates.yaml b/promptsource/templates/scan/template_opposite_right/templates.yaml index c83df3317..59ab5aecb 100644 --- a/promptsource/templates/scan/template_opposite_right/templates.yaml +++ b/promptsource/templates/scan/template_opposite_right/templates.yaml @@ -11,6 +11,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +48,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +66,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -75,6 +83,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -92,6 +102,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +120,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +138,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -140,6 +156,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -157,6 +175,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -174,6 +194,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scan/template_right/templates.yaml b/promptsource/templates/scan/template_right/templates.yaml index 530b9e952..e51fa1780 100644 --- a/promptsource/templates/scan/template_right/templates.yaml +++ b/promptsource/templates/scan/template_right/templates.yaml @@ -12,6 +12,8 @@ templates: \ actions should be comma-separated.\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: \ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +48,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +85,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -92,6 +102,8 @@ templates: , translate them into natural language.\n|||\n{{commands}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -109,6 +121,8 @@ templates: \ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -126,6 +140,8 @@ templates: \ actions.\n\n{{ commands }}\n|||\n{{ actions }} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -141,6 +157,8 @@ templates: \ commands }}\n\nSequence of actions: ||| {{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -157,6 +175,8 @@ templates: \n{{ actions }}\n|||\n{{ commands }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -174,6 +194,8 @@ templates: |||\n{{ actions }}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scicite/templates.yaml b/promptsource/templates/scicite/templates.yaml index 99d3b9853..a6491b6a0 100644 --- a/promptsource/templates/scicite/templates.yaml +++ b/promptsource/templates/scicite/templates.yaml @@ -7,8 +7,7 @@ templates: {{ string }} - {% if sectionName %} - It came from a section titled: {{sectionName}}. + {% if sectionName %} It came from a section titled: {{sectionName}}. {% endif %} @@ -28,6 +27,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -46,6 +47,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -74,6 +77,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -86,13 +91,16 @@ templates: "{{ string }}" - Is this citation describing a {{answer_choices[0]}}, a {{answer_choices[2]}}, or {{answer_choices[1]}}? + Is this citation describing a {{answer_choices[0]}}, a {{answer_choices[2]}}, + or {{answer_choices[1]}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -101,7 +109,8 @@ templates: f63606d8-7168-4201-a2bc-e48a442540ac: !Template answer_choices: method ||| background ||| result id: f63606d8-7168-4201-a2bc-e48a442540ac - jinja: 'Citations can describe a {{answer_choices[0]}}, a {{answer_choices[2]}}, or {{answer_choices[1]}}. + jinja: 'Citations can describe a {{answer_choices[0]}}, a {{answer_choices[2]}}, + or {{answer_choices[1]}}. What is the citation below describing? @@ -112,6 +121,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/scientific_papers/arxiv/templates.yaml b/promptsource/templates/scientific_papers/arxiv/templates.yaml index 604913564..57b56360c 100644 --- a/promptsource/templates/scientific_papers/arxiv/templates.yaml +++ b/promptsource/templates/scientific_papers/arxiv/templates.yaml @@ -11,6 +11,8 @@ templates: \\n\")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -29,6 +31,8 @@ templates: {{ abstract.strip().split(''\n'')[0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -47,6 +51,8 @@ templates: {{ article.strip().split(''\n'')[0] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -64,6 +70,8 @@ templates: {{ article.strip().split('' '')[:100] |join('' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -81,6 +89,8 @@ templates: {{ section_names.strip().split(''\n'')|join('', '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -101,6 +111,8 @@ templates: {{article.strip().split(''\n'')[:3]|join("\n")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/scientific_papers/pubmed/templates.yaml b/promptsource/templates/scientific_papers/pubmed/templates.yaml index f5207fd86..04b40ada0 100644 --- a/promptsource/templates/scientific_papers/pubmed/templates.yaml +++ b/promptsource/templates/scientific_papers/pubmed/templates.yaml @@ -12,6 +12,8 @@ templates: {{ section_names.strip().split(''\n'')|join('', '')}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -32,6 +34,8 @@ templates: {{article.strip().split(''\n'')[:3]|join("\n")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -49,6 +53,8 @@ templates: {{ article.strip().split(''\n'')[0] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -66,6 +72,8 @@ templates: {{ article.strip().split('' '')[:100] |join('' '')}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -81,6 +89,8 @@ templates: \ of the article.\n|||\n{{article.strip().split('\\n')[3:5]|join(\"\\n\")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -98,6 +108,8 @@ templates: {{ abstract.strip().split(''\n'')[0]}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/sciq/templates.yaml b/promptsource/templates/sciq/templates.yaml index f81e7454e..7b3825017 100644 --- a/promptsource/templates/sciq/templates.yaml +++ b/promptsource/templates/sciq/templates.yaml @@ -12,6 +12,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: \ answer_choices[order[3]] }}\n\nA:|||{{answer_choices[3]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -51,6 +55,8 @@ templates: \n- {{ answer_choices[order[3]] }}\n\n\nA:|||{{answer_choices[3]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +77,8 @@ templates: \nA:|||{{answer_choices[3]}}\n\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -83,6 +91,8 @@ templates: \nQ: {{question}}\n\n\nA:|||{{answer_choices[3]}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scitail/snli_format/templates.yaml b/promptsource/templates/scitail/snli_format/templates.yaml index 46649a81f..311ca6ced 100644 --- a/promptsource/templates/scitail/snli_format/templates.yaml +++ b/promptsource/templates/scitail/snli_format/templates.yaml @@ -19,6 +19,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scitail/tsv_format/templates.yaml b/promptsource/templates/scitail/tsv_format/templates.yaml index 1829c1711..3a5023b20 100644 --- a/promptsource/templates/scitail/tsv_format/templates.yaml +++ b/promptsource/templates/scitail/tsv_format/templates.yaml @@ -9,6 +9,8 @@ templates: {% else %}\n{{answer_choices[1]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: jinja: Suppose {{premise}} Can we infer that {{hypothesis}}? ||| {{label}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +37,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +52,8 @@ templates: {% else %}\n{{answer_choices[1]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +68,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/scitldr/Abstract/templates.yaml b/promptsource/templates/scitldr/Abstract/templates.yaml index 66e734cd7..6647a0d70 100644 --- a/promptsource/templates/scitldr/Abstract/templates.yaml +++ b/promptsource/templates/scitldr/Abstract/templates.yaml @@ -7,6 +7,8 @@ templates: jinja: "Generate a summary for the text: \n{{source | join(\" \")}}\n|||\n{{target|choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -20,6 +22,8 @@ templates: \ sentence: {{source[0]}}\n|||\n{{source | join(\" \")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -34,6 +38,8 @@ templates: \ \n|||\n{{target[0]}}\n\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -47,6 +53,8 @@ templates: \ sentence: \n|||\n{{target|choice}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -63,6 +71,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -76,6 +86,8 @@ templates: {{target|choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/selqa/answer_selection_analysis/templates.yaml b/promptsource/templates/selqa/answer_selection_analysis/templates.yaml index 4bf9fec5b..233e177ee 100644 --- a/promptsource/templates/selqa/answer_selection_analysis/templates.yaml +++ b/promptsource/templates/selqa/answer_selection_analysis/templates.yaml @@ -10,6 +10,8 @@ templates: %}{{answer_choices[1]}}{% else %}{{answer_choices[0]}}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -19,12 +21,14 @@ templates: answer_choices: No ||| Yes id: 5354e98d-8aa2-49d0-a50b-fc72a503d7d4 jinja: '{% set possible_indexes = [] %}{% for c in candidates %}{% if c|trim %}{{ - possible_indexes.append(loop.index0) | default("", True) }}{% endif %}{% endfor %}{% set - rand_index = possible_indexes | choice %} Would it make sense to reply "{{ candidates[rand_index]|trim|trim(''.'') - }}" to the question "{{ question }}"? ||| {% if rand_index in answers %}{{answer_choices[1]}}{%else - %}{{answer_choices[0]}}{%endif%}' + possible_indexes.append(loop.index0) | default("", True) }}{% endif %}{% endfor + %}{% set rand_index = possible_indexes | choice %} Would it make sense to reply + "{{ candidates[rand_index]|trim|trim(''.'') }}" to the question "{{ question + }}"? ||| {% if rand_index in answers %}{{answer_choices[1]}}{%else %}{{answer_choices[0]}}{%endif%}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -40,6 +44,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -57,6 +63,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -76,6 +84,8 @@ templates: \ \"') }} ||| {{ response }}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/sem_eval_2010_task_8/templates.yaml b/promptsource/templates/sem_eval_2010_task_8/templates.yaml index 3978ae000..62da8b7dc 100644 --- a/promptsource/templates/sem_eval_2010_task_8/templates.yaml +++ b/promptsource/templates/sem_eval_2010_task_8/templates.yaml @@ -15,6 +15,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -36,6 +38,8 @@ templates: ||| {{ answer_choices[relation] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -60,6 +64,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -85,6 +91,8 @@ templates: e1 and e2 in the sentence: ||| {{ answer_choices[relation] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -106,6 +114,8 @@ templates: as {{answer_choices[:-1]|join(", ")}} or {{answer_choices[-1]}}? ||| {{answer_choices[relation]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/sem_eval_2014_task_1/templates.yaml b/promptsource/templates/sem_eval_2014_task_1/templates.yaml index 879a777d1..1e3fca70d 100644 --- a/promptsource/templates/sem_eval_2014_task_1/templates.yaml +++ b/promptsource/templates/sem_eval_2014_task_1/templates.yaml @@ -7,6 +7,8 @@ templates: ? ||| {{answer_choices[entailment_judgment]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +23,8 @@ templates: ||| {{(((10*relatedness_score)|round)/10)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson correlation - Spearman correlation @@ -36,6 +40,8 @@ templates: \ none of these options are valid, answer \"{{answer_choices[0]}}\".\n||| {{answer_choices[entailment_judgment]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -50,6 +56,8 @@ templates: \ |||\n {{answer_choices[entailment_judgment]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -63,6 +71,8 @@ templates: what is the entailment label? ||| {{answer_choices[entailment_judgment]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +87,8 @@ templates: \ ||| {{(((10*relatedness_score)|round)/10)}}\n\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson correlation - Spearman correlation @@ -91,6 +103,8 @@ templates: unclear. ||| {{answer_choices[entailment_judgment]}} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/sent_comp/templates.yaml b/promptsource/templates/sent_comp/templates.yaml index ad4d4c15c..19bd98b24 100644 --- a/promptsource/templates/sent_comp/templates.yaml +++ b/promptsource/templates/sent_comp/templates.yaml @@ -12,6 +12,8 @@ templates: Given the above sentence, generate a compressed sentence: ||| {{compression.text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -29,6 +31,8 @@ templates: Given the above sentence, write a headline: ||| {{compression.text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -43,6 +47,8 @@ templates: Compressed sentence: ||| {{compression.text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -57,6 +63,8 @@ templates: Extreme TL;DR: ||| {{compression.text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -77,6 +85,8 @@ templates: {{headline}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -92,6 +102,8 @@ templates: Given the above sentence, write one compressed sentence to summarize: ||| {{compression.text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -103,6 +115,8 @@ templates: jinja: 'Compress: {{graph.sentence}} ||| {{compression.text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/sick/templates.yaml b/promptsource/templates/sick/templates.yaml index 989a76c84..427bd03bc 100644 --- a/promptsource/templates/sick/templates.yaml +++ b/promptsource/templates/sick/templates.yaml @@ -20,6 +20,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation - Spearman Correlation @@ -41,6 +43,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +70,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -84,6 +90,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -103,6 +111,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/sms_spam/templates.yaml b/promptsource/templates/sms_spam/templates.yaml index c33041403..3bf7eece7 100644 --- a/promptsource/templates/sms_spam/templates.yaml +++ b/promptsource/templates/sms_spam/templates.yaml @@ -7,6 +7,8 @@ templates: {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -35,6 +39,8 @@ templates: {{ answer_choices [label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -47,6 +53,8 @@ templates: ? {{sms}} \n|||\n{{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +66,8 @@ templates: jinja: "Is this sms message considered {{\"spam\"}}? \n{{sms}}\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/snips_built_in_intents/templates.yaml b/promptsource/templates/snips_built_in_intents/templates.yaml index 9ce692154..9489f8e54 100644 --- a/promptsource/templates/snips_built_in_intents/templates.yaml +++ b/promptsource/templates/snips_built_in_intents/templates.yaml @@ -10,6 +10,8 @@ templates: \ | join(\", \")}}. \n\nWhich one would that be?\n\n|||\n\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -35,6 +37,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -50,6 +54,8 @@ templates: \ \n\n{{text}}\n\n|||\n\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +77,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +94,8 @@ templates: \ | join(\", \")}}\n\n|||\n\n{{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -101,6 +111,8 @@ templates: \n|||\n\n\n{{answer_choices[label]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +138,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/snli/templates.yaml b/promptsource/templates/snli/templates.yaml index f1fd816e2..49d398716 100644 --- a/promptsource/templates/snli/templates.yaml +++ b/promptsource/templates/snli/templates.yaml @@ -1,5 +1,4 @@ dataset: snli -subset: None templates: 11c67e6d-affb-4e8d-8a04-10186f8a789b: !Template answer_choices: Yes ||| Maybe ||| No @@ -8,6 +7,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +21,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +36,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +50,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +66,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +81,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +97,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -99,6 +112,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -111,6 +126,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -123,6 +140,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -135,6 +154,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -147,6 +168,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -160,6 +183,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -173,6 +198,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -185,6 +212,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/social_i_qa/templates.yaml b/promptsource/templates/social_i_qa/templates.yaml index f294ba7dc..ec8e5ca1c 100644 --- a/promptsource/templates/social_i_qa/templates.yaml +++ b/promptsource/templates/social_i_qa/templates.yaml @@ -15,6 +15,8 @@ templates: {{answer_choices[label | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -38,6 +40,8 @@ templates: {{answer_choices[label | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +56,8 @@ templates: \ | int) - 1 == random_answer_id %}\n Yes\n{% else %}\n No\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -73,6 +79,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -94,6 +102,8 @@ templates: {{answer_choices[label | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -126,6 +136,8 @@ templates: {{{"1": "A", "2": "B", "3": "C"}[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/species_800/templates.yaml b/promptsource/templates/species_800/templates.yaml index fc83e9fa6..60204e46b 100644 --- a/promptsource/templates/species_800/templates.yaml +++ b/promptsource/templates/species_800/templates.yaml @@ -17,6 +17,8 @@ templates: \ || \") }}" metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: affirmative_bottom_list @@ -68,6 +70,8 @@ templates: )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: affirmative_top_string @@ -119,6 +123,8 @@ templates: )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: affirmative_bottom_string @@ -174,6 +180,8 @@ templates: )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: interrogative_bottom_string @@ -215,6 +223,8 @@ templates: {{ new_list | join(" || ") }}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: interrogative_bottom_list @@ -236,6 +246,8 @@ templates: \ \") }}" metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: affirmative_top_list @@ -292,6 +304,8 @@ templates: )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: interrogative_top_string @@ -335,6 +349,8 @@ templates: {{ new_list | join(" || ") }}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: [] original_task: true name: interrogative_top_list diff --git a/promptsource/templates/squad/templates.yaml b/promptsource/templates/squad/templates.yaml index 8c72ee157..53f03ea35 100644 --- a/promptsource/templates/squad/templates.yaml +++ b/promptsource/templates/squad/templates.yaml @@ -16,6 +16,8 @@ templates: {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -33,6 +35,8 @@ templates: A: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -47,6 +51,8 @@ templates: Generate a question from the above passage : ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -63,6 +69,8 @@ templates: as the answer would be: ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -76,6 +84,8 @@ templates: \n{{answers.text[0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -94,6 +104,8 @@ templates: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/squad_adversarial/AddSent/templates.yaml b/promptsource/templates/squad_adversarial/AddSent/templates.yaml index 15d096288..af2a74e32 100644 --- a/promptsource/templates/squad_adversarial/AddSent/templates.yaml +++ b/promptsource/templates/squad_adversarial/AddSent/templates.yaml @@ -14,6 +14,8 @@ templates: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -35,6 +37,8 @@ templates: {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -50,6 +54,8 @@ templates: as the answer would be: ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -65,6 +71,8 @@ templates: Generate a question from the above passage : ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -78,6 +86,8 @@ templates: \n{{answers.text[0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -95,6 +105,8 @@ templates: A: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/squad_v2/templates.yaml b/promptsource/templates/squad_v2/templates.yaml index dbc1d8b8c..814be0b6f 100644 --- a/promptsource/templates/squad_v2/templates.yaml +++ b/promptsource/templates/squad_v2/templates.yaml @@ -31,6 +31,8 @@ templates: {% endif %}' metadata: &id001 !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -57,6 +59,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -91,6 +95,8 @@ templates: {{title | replace("_", " ")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -131,6 +137,8 @@ templates: {{title | replace("_", " ")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -157,6 +165,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -203,6 +213,8 @@ templates: {% endif %}' metadata: &id002 !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -255,6 +267,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -312,6 +326,8 @@ templates: \ %}\n{{answer_choices[1]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -341,6 +357,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -358,6 +376,8 @@ templates: {{title | replace("_", " ")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/squadshifts/amazon/templates.yaml b/promptsource/templates/squadshifts/amazon/templates.yaml index 628f59c76..4010ce0d2 100644 --- a/promptsource/templates/squadshifts/amazon/templates.yaml +++ b/promptsource/templates/squadshifts/amazon/templates.yaml @@ -18,6 +18,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -38,6 +40,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -59,6 +63,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -80,6 +86,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -100,6 +108,8 @@ templates: {{answers["text"] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -123,6 +133,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -143,6 +155,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -156,6 +170,8 @@ templates: \ question: \n{{question}} |||\n{{answers[\"text\"]|choice}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/squadshifts/new_wiki/templates.yaml b/promptsource/templates/squadshifts/new_wiki/templates.yaml index d828d39dd..5a5be1170 100644 --- a/promptsource/templates/squadshifts/new_wiki/templates.yaml +++ b/promptsource/templates/squadshifts/new_wiki/templates.yaml @@ -15,6 +15,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -35,6 +37,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -58,6 +62,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -77,6 +83,8 @@ templates: {{answers["text"] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -100,6 +108,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -119,6 +129,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -132,6 +144,8 @@ templates: \ question: \n{{question}} |||\n{{answers[\"text\"]|choice}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -153,6 +167,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -175,6 +191,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/squadshifts/nyt/templates.yaml b/promptsource/templates/squadshifts/nyt/templates.yaml index 89ecab438..c2118b25c 100644 --- a/promptsource/templates/squadshifts/nyt/templates.yaml +++ b/promptsource/templates/squadshifts/nyt/templates.yaml @@ -18,6 +18,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -38,6 +40,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -48,8 +52,8 @@ templates: answer_choices: null id: 6336eed0-3ecd-4007-8ad3-f6e615570fdf jinja: 'I''m working on the final exam for my class and am trying to figure out - the answer to the question "{{question}}" I found the following info on New York Times - and I think it has the answer. Can you tell me the answer? + the answer to the question "{{question}}" I found the following info on New + York Times and I think it has the answer. Can you tell me the answer? {{context}} @@ -60,6 +64,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -72,6 +78,8 @@ templates: \ question: \n{{question}} |||\n{{answers[\"text\"]|choice}}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -94,6 +102,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -114,6 +124,8 @@ templates: {{answers["text"] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -136,6 +148,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -156,6 +170,8 @@ templates: {{answers[''text''] | most_frequent | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/sst/default/templates.yaml b/promptsource/templates/sst/default/templates.yaml index 75564b551..b7539bb6b 100644 --- a/promptsource/templates/sst/default/templates.yaml +++ b/promptsource/templates/sst/default/templates.yaml @@ -16,6 +16,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +36,8 @@ templates: {{''%0.1f''| format(label|float)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -52,6 +56,8 @@ templates: {{''%0.1f''| format(label|float)}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -68,6 +74,8 @@ templates: {{answer_choices[0 if label < 0.5 else 1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -88,6 +96,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/story_cloze/2016/templates.yaml b/promptsource/templates/story_cloze/2016/templates.yaml index b01484750..c121f8340 100644 --- a/promptsource/templates/story_cloze/2016/templates.yaml +++ b/promptsource/templates/story_cloze/2016/templates.yaml @@ -10,6 +10,8 @@ templates: -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -24,6 +26,8 @@ templates: n- \")}}\n|||\n\n{{answer_choices[answer_right_ending -1]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -38,6 +42,8 @@ templates: -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +58,8 @@ templates: \ -1]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +73,8 @@ templates: -1]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -80,6 +90,8 @@ templates: ")}} ||| {{answer_choices[answer_right_ending -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/stsb_multi_mt/en/templates.yaml b/promptsource/templates/stsb_multi_mt/en/templates.yaml index 0aadbce65..6e731e94c 100644 --- a/promptsource/templates/stsb_multi_mt/en/templates.yaml +++ b/promptsource/templates/stsb_multi_mt/en/templates.yaml @@ -8,6 +8,8 @@ templates: {{"0.0"}} and {{"5.0"}}. ||| {{(((5*similarity_score)|round)/5)}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -20,6 +22,8 @@ templates: ||| {{answer_choices[0 if similarity_score < 2.5 else 1]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -32,6 +36,8 @@ templates: if similarity_score < 2.5 else 1]}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -44,6 +50,8 @@ templates: and "{{sentence2}}"? ||| {{(((5*similarity_score)|round)/5)}} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true @@ -57,6 +65,8 @@ templates: {{(((5*similarity_score)|round)/5)}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Pearson Correlation original_task: true diff --git a/promptsource/templates/subjqa/books/templates.yaml b/promptsource/templates/subjqa/books/templates.yaml index 96fcadb4c..4fa67ca3e 100644 --- a/promptsource/templates/subjqa/books/templates.yaml +++ b/promptsource/templates/subjqa/books/templates.yaml @@ -25,6 +25,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -61,6 +63,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -76,6 +80,8 @@ templates: text\"][0]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -91,6 +97,8 @@ templates: text\"] | join(\" \\n \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -124,6 +132,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -146,6 +156,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -174,6 +186,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -205,6 +219,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -232,6 +248,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/subjqa/electronics/templates.yaml b/promptsource/templates/subjqa/electronics/templates.yaml index 1ec1f3867..049c07397 100644 --- a/promptsource/templates/subjqa/electronics/templates.yaml +++ b/promptsource/templates/subjqa/electronics/templates.yaml @@ -1,4 +1,5 @@ -dataset: subjqa/electronics +dataset: subjqa +subset: electronics templates: 2077a669-1574-4117-84fe-e683bead8d46: !Template answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor @@ -26,6 +27,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -62,6 +65,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -77,6 +82,8 @@ templates: text\"][0]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -106,6 +113,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -139,6 +148,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -167,6 +178,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -189,6 +202,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -204,6 +219,8 @@ templates: text\"] | join(\" \\n \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -231,6 +248,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/subjqa/grocery/templates.yaml b/promptsource/templates/subjqa/grocery/templates.yaml index c19dd2fb5..ea3f6eaaf 100644 --- a/promptsource/templates/subjqa/grocery/templates.yaml +++ b/promptsource/templates/subjqa/grocery/templates.yaml @@ -1,4 +1,5 @@ -dataset: subjqa/grocery +dataset: subjqa +subset: grocery templates: 0f728f5b-6488-439d-8a92-6e15a1d87c62: !Template answer_choices: null @@ -10,6 +11,8 @@ templates: text\"][0]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -41,6 +44,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -77,6 +82,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -99,6 +106,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -127,6 +136,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -160,6 +171,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -189,6 +202,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -204,6 +219,8 @@ templates: text\"] | join(\" \\n \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -231,6 +248,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/subjqa/movies/templates.yaml b/promptsource/templates/subjqa/movies/templates.yaml index deb37aeba..25a1e118f 100644 --- a/promptsource/templates/subjqa/movies/templates.yaml +++ b/promptsource/templates/subjqa/movies/templates.yaml @@ -1,4 +1,5 @@ -dataset: subjqa/movies +dataset: subjqa +subset: movies templates: 1b63e0fb-e9c3-4e6c-b5f1-3a922fcef327: !Template answer_choices: null @@ -10,6 +11,8 @@ templates: text\"] | join(\" \\n \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -32,6 +35,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -47,6 +52,8 @@ templates: text\"][0]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -76,6 +83,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -103,6 +112,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -139,6 +150,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -167,6 +180,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -198,6 +213,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -231,6 +248,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/subjqa/restaurants/templates.yaml b/promptsource/templates/subjqa/restaurants/templates.yaml index 49095f5bc..fd39fb8db 100644 --- a/promptsource/templates/subjqa/restaurants/templates.yaml +++ b/promptsource/templates/subjqa/restaurants/templates.yaml @@ -1,4 +1,5 @@ -dataset: subjqa/restaurants +dataset: subjqa +subset: restaurants templates: 5177d00a-255d-4a80-bb77-2d94f40e276c: !Template answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 @@ -23,6 +24,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -59,6 +62,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -74,6 +79,8 @@ templates: text\"][0]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -101,6 +108,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -130,6 +139,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -163,6 +174,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -194,6 +207,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -209,6 +224,8 @@ templates: text\"] | join(\" \\n \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -231,6 +248,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/subjqa/tripadvisor/templates.yaml b/promptsource/templates/subjqa/tripadvisor/templates.yaml index da46fc6dc..3c34d16b3 100644 --- a/promptsource/templates/subjqa/tripadvisor/templates.yaml +++ b/promptsource/templates/subjqa/tripadvisor/templates.yaml @@ -1,4 +1,5 @@ -dataset: subjqa/tripadvisor +dataset: subjqa +subset: tripadvisor templates: 0cb4bf0f-6f89-4f17-bf81-9740fac3d374: !Template answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor @@ -26,6 +27,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -41,6 +44,8 @@ templates: text\"] | join(\" \\n \")}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -63,6 +68,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -96,6 +103,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -124,6 +133,8 @@ templates: {{answer_choices[question_subj_level -1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -139,6 +150,8 @@ templates: text\"][0]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -166,6 +179,8 @@ templates: {{answer_choices[mapping[domain]]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -195,6 +210,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -231,6 +248,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/super_glue/axb/templates.yaml b/promptsource/templates/super_glue/axb/templates.yaml index 444b59934..bb25f4569 100644 --- a/promptsource/templates/super_glue/axb/templates.yaml +++ b/promptsource/templates/super_glue/axb/templates.yaml @@ -8,6 +8,8 @@ templates: or no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +51,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -57,6 +65,8 @@ templates: no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -69,6 +79,8 @@ templates: {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -81,6 +93,8 @@ templates: \ or no? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +107,8 @@ templates: Yes or no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: Yes or no? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -118,6 +136,8 @@ templates: Question: {{sentence2}} True or False? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/axg/templates.yaml b/promptsource/templates/super_glue/axg/templates.yaml index 684eb40fb..3d247434f 100644 --- a/promptsource/templates/super_glue/axg/templates.yaml +++ b/promptsource/templates/super_glue/axg/templates.yaml @@ -8,6 +8,8 @@ templates: or no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -20,6 +22,8 @@ templates: \ or no? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +36,8 @@ templates: Yes or no? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -44,6 +50,8 @@ templates: Yes or no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -57,6 +65,8 @@ templates: Question: {{hypothesis}} True or False? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -69,6 +79,8 @@ templates: no? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -81,6 +93,8 @@ templates: {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +107,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +121,8 @@ templates: {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -118,6 +136,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/boolq/templates.yaml b/promptsource/templates/super_glue/boolq/templates.yaml index 3a7ae0745..0fa8b0ac9 100644 --- a/promptsource/templates/super_glue/boolq/templates.yaml +++ b/promptsource/templates/super_glue/boolq/templates.yaml @@ -17,6 +17,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: \ -1 %}\n{{ answer_choices[label] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -41,6 +45,8 @@ templates: \ label != -1 %}\n{{ answer_choices[label] }} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -73,6 +81,8 @@ templates: \ ||| {% if label != -1 %}{{ answer_choices[label] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +95,8 @@ templates: \ ||| \n{% if label != -1 %}\n{{answer_choices[label]}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +117,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -127,6 +141,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -143,6 +159,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -163,6 +181,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/cb/templates.yaml b/promptsource/templates/super_glue/cb/templates.yaml index 835b2529f..895aee368 100644 --- a/promptsource/templates/super_glue/cb/templates.yaml +++ b/promptsource/templates/super_glue/cb/templates.yaml @@ -8,6 +8,8 @@ templates: ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +23,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +38,8 @@ templates: }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +52,8 @@ templates: ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +66,8 @@ templates: or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +81,8 @@ templates: }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +97,8 @@ templates: answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -99,6 +113,8 @@ templates: \ if label !=-1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -111,6 +127,8 @@ templates: or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +142,8 @@ templates: %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -137,6 +157,8 @@ templates: \ !=-1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -149,6 +171,8 @@ templates: \ no, or maybe? ||| {% if label !=-1 %}{{answer_choices[label]}}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -162,6 +186,8 @@ templates: {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -174,6 +200,8 @@ templates: no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -189,6 +217,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/copa/templates.yaml b/promptsource/templates/super_glue/copa/templates.yaml index 23083492b..9e9c0a30e 100644 --- a/promptsource/templates/super_glue/copa/templates.yaml +++ b/promptsource/templates/super_glue/copa/templates.yaml @@ -15,6 +15,8 @@ templates: - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: \ != -1 %}{{ answer_choices[label] }}{%endif%}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -42,6 +46,8 @@ templates: \ }}{%endif%}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -60,6 +66,8 @@ templates: - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -73,6 +81,8 @@ templates: != -1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +96,8 @@ templates: \ }}{%endif%}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -99,6 +111,8 @@ templates: \ {% endif %}\n||| {% if label != -1 %}{{answer_choices[label]}}{%endif%}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -112,6 +126,8 @@ templates: \ != -1 %}{{ answer_choices[label] }}{%endif%}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -130,6 +146,8 @@ templates: - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -149,6 +167,8 @@ templates: - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -162,6 +182,8 @@ templates: \ }}{%endif%}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -180,6 +202,8 @@ templates: - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/multirc/templates.yaml b/promptsource/templates/super_glue/multirc/templates.yaml index 61bd6a954..2107173d0 100644 --- a/promptsource/templates/super_glue/multirc/templates.yaml +++ b/promptsource/templates/super_glue/multirc/templates.yaml @@ -16,6 +16,8 @@ templates: {% if label != -1 %}{{answer_choices[label]}}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: \ }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -49,6 +53,8 @@ templates: {% if label != -1 %}{{answer_choices[label]}}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: \ }}\"? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -73,6 +81,8 @@ templates: \ if label != -1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -93,6 +103,8 @@ templates: {% if label != -1 %}{{answer_choices[label]}}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -105,6 +117,8 @@ templates: \ }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -125,6 +139,8 @@ templates: {% if label != -1 %}{{answer_choices[label]}}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -145,6 +161,8 @@ templates: {% if label != -1 %}{{answer_choices[label]}}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -158,6 +176,8 @@ templates: \ endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/record/templates.yaml b/promptsource/templates/super_glue/record/templates.yaml index 8d2511f8a..6949a3004 100644 --- a/promptsource/templates/super_glue/record/templates.yaml +++ b/promptsource/templates/super_glue/record/templates.yaml @@ -9,6 +9,8 @@ templates: \ | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Squad original_task: true @@ -22,6 +24,8 @@ templates: \ answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -38,6 +42,8 @@ templates: \ {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +61,8 @@ templates: \ }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -68,6 +76,8 @@ templates: ||| {% if ( answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Squad original_task: true @@ -83,6 +93,8 @@ templates: \ 0 %}- {{ query | replace(\"@placeholder\", answers | choice) }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -99,6 +111,8 @@ templates: \ {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -112,6 +126,8 @@ templates: \ endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -133,6 +149,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -149,6 +167,8 @@ templates: \ choice) }} {% endif %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -162,6 +182,8 @@ templates: \ answers | choice }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Squad original_task: true @@ -188,6 +210,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Squad original_task: true @@ -200,6 +224,8 @@ templates: \ ( answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -216,6 +242,8 @@ templates: , answers | choice) }} {% endif %}\n" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -239,6 +267,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -263,6 +293,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Squad original_task: true @@ -278,6 +310,8 @@ templates: \ }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -294,6 +328,8 @@ templates: \ choice) }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -307,6 +343,8 @@ templates: \ | choice }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -322,6 +360,8 @@ templates: \ {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/rte/templates.yaml b/promptsource/templates/super_glue/rte/templates.yaml index 2227004a5..11dd1c2c5 100644 --- a/promptsource/templates/super_glue/rte/templates.yaml +++ b/promptsource/templates/super_glue/rte/templates.yaml @@ -9,6 +9,8 @@ templates: -1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +23,8 @@ templates: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +37,8 @@ templates: {% if label != -1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -47,6 +53,8 @@ templates: }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +67,8 @@ templates: \ or no? ||| {% if label != -1 %}{{answer_choices[label]}}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -71,6 +81,8 @@ templates: no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -83,6 +95,8 @@ templates: {% if label != -1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -95,6 +109,8 @@ templates: Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -107,6 +123,8 @@ templates: ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -119,6 +137,8 @@ templates: Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/wic/templates.yaml b/promptsource/templates/super_glue/wic/templates.yaml index 40e4c205f..1ca77970d 100644 --- a/promptsource/templates/super_glue/wic/templates.yaml +++ b/promptsource/templates/super_glue/wic/templates.yaml @@ -18,6 +18,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +41,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -64,6 +68,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +92,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -108,6 +116,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -131,6 +141,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -144,6 +156,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -166,6 +180,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -190,6 +206,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -211,6 +229,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/super_glue/wsc.fixed/templates.yaml b/promptsource/templates/super_glue/wsc.fixed/templates.yaml index 30fb7ee34..7c94ac112 100644 --- a/promptsource/templates/super_glue/wsc.fixed/templates.yaml +++ b/promptsource/templates/super_glue/wsc.fixed/templates.yaml @@ -9,6 +9,8 @@ templates: }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +23,8 @@ templates: or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +38,8 @@ templates: \ -1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -47,6 +53,8 @@ templates: answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +67,8 @@ templates: }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +82,8 @@ templates: \ != -1 %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +97,8 @@ templates: }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -100,6 +114,8 @@ templates: \ }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -113,6 +129,8 @@ templates: \ }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -128,6 +146,8 @@ templates: \ %}{{ answer_choices[label] }}{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/swag/regular/templates.yaml b/promptsource/templates/swag/regular/templates.yaml index 3d757c369..1a737235a 100644 --- a/promptsource/templates/swag/regular/templates.yaml +++ b/promptsource/templates/swag/regular/templates.yaml @@ -11,6 +11,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -27,6 +29,8 @@ templates: \ endif %}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -40,6 +44,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -54,6 +60,8 @@ templates: \ }}\n(d) {{ ending3 }}\n||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +75,8 @@ templates: \ ending2 }}\n(d) {{ ending3 }}\n||| \n{{answer_choices[label]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +89,8 @@ templates: \ starting sentence with the ending: {{endings[label]}}\n||| \n{{sent1}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -97,6 +109,8 @@ templates: {{ endings[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/tab_fact/tab_fact/templates.yaml b/promptsource/templates/tab_fact/tab_fact/templates.yaml index 759194643..6c375db38 100644 --- a/promptsource/templates/tab_fact/tab_fact/templates.yaml +++ b/promptsource/templates/tab_fact/tab_fact/templates.yaml @@ -23,6 +23,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Edit Distance original_task: false @@ -38,6 +40,8 @@ templates: \ one is it? ||| \n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -53,6 +57,8 @@ templates: n\"}} is the delimiter between rows.\n|||\n{{statement}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Edit Distance original_task: false @@ -66,6 +72,8 @@ templates: \ {{\"\\n\"}} is the delimiter between rows.\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -92,6 +100,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tmu_gfm_dataset/templates.yaml b/promptsource/templates/tmu_gfm_dataset/templates.yaml index fa703664c..9fa60ac3b 100644 --- a/promptsource/templates/tmu_gfm_dataset/templates.yaml +++ b/promptsource/templates/tmu_gfm_dataset/templates.yaml @@ -17,6 +17,8 @@ templates: {{ (((10*ave_f) | round )/10) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -39,6 +41,8 @@ templates: {{ (((10*ave_g) | round )/10) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -58,6 +62,8 @@ templates: {{output}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -81,6 +87,8 @@ templates: {{ (((10*ave_m) | round )/10) }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -109,6 +117,8 @@ templates: | round )/10) }}.' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -145,6 +155,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/trec/templates.yaml b/promptsource/templates/trec/templates.yaml index a4aca1da6..d3ebfe499 100644 --- a/promptsource/templates/trec/templates.yaml +++ b/promptsource/templates/trec/templates.yaml @@ -8,6 +8,8 @@ templates: \ {{text}} \nAnswer: ||| {{ answer_choices [label_coarse] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -58,6 +62,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -86,6 +92,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -112,6 +120,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -134,6 +144,8 @@ templates: {{answer_choices[label_coarse]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -159,6 +171,8 @@ templates: {{answer_choices[label_fine] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -183,6 +197,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -197,6 +213,8 @@ templates: \ [label_coarse] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -220,6 +238,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -239,6 +259,8 @@ templates: {{ answer_choices [label_coarse] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -252,6 +274,8 @@ templates: \ }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -265,6 +289,8 @@ templates: \ [label_mapping[label_fine]] }}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -284,6 +310,8 @@ templates: {{ answer_choices [label_coarse] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -307,6 +335,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -332,6 +362,8 @@ templates: {{ answer_choices[label_fine] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -354,6 +386,8 @@ templates: {{ answer_choices [label_mapping[label_fine]] }}{% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -378,6 +412,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/trivia_qa/unfiltered/templates.yaml b/promptsource/templates/trivia_qa/unfiltered/templates.yaml index c9e309066..c25041f47 100644 --- a/promptsource/templates/trivia_qa/unfiltered/templates.yaml +++ b/promptsource/templates/trivia_qa/unfiltered/templates.yaml @@ -8,6 +8,8 @@ templates: {{answer.aliases|choice}}\" \n ||| \n {{question}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -22,6 +24,8 @@ templates: \ \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -35,6 +39,8 @@ templates: \ %} \n{{answer.aliases|choice}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -48,6 +54,8 @@ templates: {{answer.aliases|choice}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other @@ -61,6 +69,8 @@ templates: {% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy - Other diff --git a/promptsource/templates/turk/templates.yaml b/promptsource/templates/turk/templates.yaml index 9d1c8e7ca..c61b67552 100644 --- a/promptsource/templates/turk/templates.yaml +++ b/promptsource/templates/turk/templates.yaml @@ -21,6 +21,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -74,6 +76,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -100,6 +104,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -121,6 +127,8 @@ templates: \ \"Text B\".\n|||\nText A\n{% endif %} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/emoji/templates.yaml b/promptsource/templates/tweet_eval/emoji/templates.yaml index 4be409c9b..fb85973d3 100755 --- a/promptsource/templates/tweet_eval/emoji/templates.yaml +++ b/promptsource/templates/tweet_eval/emoji/templates.yaml @@ -16,6 +16,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: true @@ -36,6 +38,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Other original_task: false @@ -55,6 +59,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/tweet_eval/emotion/templates.yaml b/promptsource/templates/tweet_eval/emotion/templates.yaml index 5dcc2387c..86ba7aa0d 100644 --- a/promptsource/templates/tweet_eval/emotion/templates.yaml +++ b/promptsource/templates/tweet_eval/emotion/templates.yaml @@ -15,6 +15,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -32,6 +34,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +56,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -65,6 +71,8 @@ templates: (d) {{answer_choices[3]}}\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -86,6 +94,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/hate/templates.yaml b/promptsource/templates/tweet_eval/hate/templates.yaml index 839693ed0..e5654fdb5 100644 --- a/promptsource/templates/tweet_eval/hate/templates.yaml +++ b/promptsource/templates/tweet_eval/hate/templates.yaml @@ -12,6 +12,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -28,6 +30,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +43,8 @@ templates: jinja: "Is this a hateful tweet? \n{{text}}\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -55,6 +61,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +75,8 @@ templates: \ a hateful tweet? \n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/irony/templates.yaml b/promptsource/templates/tweet_eval/irony/templates.yaml index bed666f12..a3b851ae3 100644 --- a/promptsource/templates/tweet_eval/irony/templates.yaml +++ b/promptsource/templates/tweet_eval/irony/templates.yaml @@ -7,6 +7,8 @@ templates: jinja: "Is this tweet is ironic? \n\n{{text}} |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +35,8 @@ templates: \ an ironic tweet? \n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -47,6 +53,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +75,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/offensive/templates.yaml b/promptsource/templates/tweet_eval/offensive/templates.yaml index b89e064b4..276a9c7b4 100644 --- a/promptsource/templates/tweet_eval/offensive/templates.yaml +++ b/promptsource/templates/tweet_eval/offensive/templates.yaml @@ -7,6 +7,8 @@ templates: jinja: "Is this tweet {{\"offensive\"}}? \n\n{{text}} |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -19,6 +21,8 @@ templates: \ \n\n{{text}}\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -37,6 +41,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +64,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +78,8 @@ templates: \ an offensive tweet?\n\n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/sentiment/templates.yaml b/promptsource/templates/tweet_eval/sentiment/templates.yaml index 0e391b2d3..7ace74214 100644 --- a/promptsource/templates/tweet_eval/sentiment/templates.yaml +++ b/promptsource/templates/tweet_eval/sentiment/templates.yaml @@ -17,6 +17,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: \ {{answer_choices | join(\", \")}}\n|||\n{{answer_choices[label]}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -41,6 +45,8 @@ templates: \ {{answer_choices | join(\", \")}} \n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -54,6 +60,8 @@ templates: , \")}}\n|||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +75,8 @@ templates: |||\n{{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/stance_abortion/templates.yaml b/promptsource/templates/tweet_eval/stance_abortion/templates.yaml index 83125102e..833ecde9b 100644 --- a/promptsource/templates/tweet_eval/stance_abortion/templates.yaml +++ b/promptsource/templates/tweet_eval/stance_abortion/templates.yaml @@ -12,6 +12,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -26,6 +28,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -43,6 +47,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -59,6 +65,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,7 +75,8 @@ templates: c5507588-1d20-42f9-935f-0c767294f5a9: !Template answer_choices: Neutral ||| Against ||| In favor id: c5507588-1d20-42f9-935f-0c767294f5a9 - jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", ")}} + jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", + ")}} {{text}} ||| @@ -75,6 +84,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -88,6 +99,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/stance_atheism/templates.yaml b/promptsource/templates/tweet_eval/stance_atheism/templates.yaml index 42f0d08bb..0365325e1 100644 --- a/promptsource/templates/tweet_eval/stance_atheism/templates.yaml +++ b/promptsource/templates/tweet_eval/stance_atheism/templates.yaml @@ -9,6 +9,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -17,7 +19,8 @@ templates: 4309e10d-c9a9-4a17-8561-15270b998905: !Template answer_choices: Neutral ||| Against ||| In favor id: 4309e10d-c9a9-4a17-8561-15270b998905 - jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", ")}} + jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", + ")}} {{text}} ||| @@ -25,6 +28,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -42,6 +47,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +65,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -74,6 +83,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -88,6 +99,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/stance_climate/templates.yaml b/promptsource/templates/tweet_eval/stance_climate/templates.yaml index 3d602f954..f114b4b5f 100644 --- a/promptsource/templates/tweet_eval/stance_climate/templates.yaml +++ b/promptsource/templates/tweet_eval/stance_climate/templates.yaml @@ -13,6 +13,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -26,6 +28,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -34,7 +38,8 @@ templates: 703f067e-5930-424e-9882-48063307ff8e: !Template answer_choices: Neutral ||| Against ||| In favor id: 703f067e-5930-424e-9882-48063307ff8e - jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", ")}} + jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", + ")}} {{text}} ||| @@ -42,6 +47,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -50,12 +57,14 @@ templates: 8ecd5059-742a-4833-95a1-bf0e25e9abfc: !Template answer_choices: Neutral ||| Against ||| In favor id: 8ecd5059-742a-4833-95a1-bf0e25e9abfc - jinja: '{{text}} Where does the author of the above sentence stand on climate change? - ||| + jinja: '{{text}} Where does the author of the above sentence stand on climate + change? ||| {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -64,7 +73,8 @@ templates: cd82620b-6d1d-42f7-af89-56980cbb69a5: !Template answer_choices: Neutral ||| Against ||| In favor id: cd82620b-6d1d-42f7-af89-56980cbb69a5 - jinja: 'Does the author express any stance about climate change in the following text? + jinja: 'Does the author express any stance about climate change in the following + text? {{text}} ||| @@ -72,6 +82,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -88,6 +100,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/stance_feminist/templates.yaml b/promptsource/templates/tweet_eval/stance_feminist/templates.yaml index 7234d9b47..4ec2b5a7f 100644 --- a/promptsource/templates/tweet_eval/stance_feminist/templates.yaml +++ b/promptsource/templates/tweet_eval/stance_feminist/templates.yaml @@ -9,6 +9,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -17,7 +19,8 @@ templates: 4309e10d-c9a9-4a17-8561-15270b99890b: !Template answer_choices: Neutral ||| Against ||| In favor id: 4309e10d-c9a9-4a17-8561-15270b99890b - jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", ")}} + jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", + ")}} {{text}} ||| @@ -25,6 +28,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -42,6 +47,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +65,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -74,6 +83,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -88,6 +99,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tweet_eval/stance_hillary/templates.yaml b/promptsource/templates/tweet_eval/stance_hillary/templates.yaml index 5dc7cbb61..68cd2bc4a 100644 --- a/promptsource/templates/tweet_eval/stance_hillary/templates.yaml +++ b/promptsource/templates/tweet_eval/stance_hillary/templates.yaml @@ -12,6 +12,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -29,6 +31,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -45,6 +49,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -58,6 +64,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -72,6 +80,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -80,7 +90,8 @@ templates: b521857a-9d4f-4e21-848b-0baf7f4a636c: !Template answer_choices: Neutral ||| Against ||| In favor id: b521857a-9d4f-4e21-848b-0baf7f4a636c - jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", ")}} + jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(", + ")}} {{text}} ||| @@ -88,6 +99,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tydiqa/primary_task/templates.yaml b/promptsource/templates/tydiqa/primary_task/templates.yaml index e4cb527ad..445d45e73 100644 --- a/promptsource/templates/tydiqa/primary_task/templates.yaml +++ b/promptsource/templates/tydiqa/primary_task/templates.yaml @@ -24,6 +24,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -38,6 +40,8 @@ templates: \ | capitalize}}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +56,8 @@ templates: \ | capitalize}} \n {% endif %} \n{% endif %} " metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -81,6 +87,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -107,6 +115,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -122,6 +132,8 @@ templates: \ {% endif %}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -136,6 +148,8 @@ templates: {{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -150,6 +164,8 @@ templates: \ | capitalize}}\n {% endif %}\n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/tydiqa/secondary_task/templates.yaml b/promptsource/templates/tydiqa/secondary_task/templates.yaml index 215499e23..e5023fc6e 100644 --- a/promptsource/templates/tydiqa/secondary_task/templates.yaml +++ b/promptsource/templates/tydiqa/secondary_task/templates.yaml @@ -10,6 +10,8 @@ templates: \ | choice}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -51,6 +53,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - BLEU - ROUGE @@ -74,6 +78,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -101,6 +107,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -114,6 +122,8 @@ templates: \ {{context}}\n|||\n{{answers.text | choice}} \n{% endif %}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -138,6 +148,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -160,6 +172,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -184,6 +198,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: false @@ -211,6 +227,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/web_questions/templates.yaml b/promptsource/templates/web_questions/templates.yaml index bc819077a..d98f106e3 100644 --- a/promptsource/templates/web_questions/templates.yaml +++ b/promptsource/templates/web_questions/templates.yaml @@ -7,6 +7,8 @@ templates: choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -19,6 +21,8 @@ templates: {{ answers | choice }} metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -30,6 +34,8 @@ templates: jinja: 'What''s the answer to that question: {{question}} ||| {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -41,6 +47,8 @@ templates: jinja: 'Short general knowledge question: {{question}} ||| {{answers | choice}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -52,6 +60,8 @@ templates: jinja: '{{ question|capitalize }} ||| {{ answers | choice }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/wiki_bio/templates.yaml b/promptsource/templates/wiki_bio/templates.yaml index 355e63795..e7e65906d 100644 --- a/promptsource/templates/wiki_bio/templates.yaml +++ b/promptsource/templates/wiki_bio/templates.yaml @@ -22,6 +22,8 @@ templates: {{target_text}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -43,6 +45,8 @@ templates: content\"][n] }}\n{% endif %}\n{% endfor %}\n" metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false @@ -58,6 +62,8 @@ templates: _\",\" \") }} \n{% endif %}\n{% endfor %}" metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false @@ -86,6 +92,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false @@ -112,6 +120,8 @@ templates: {% endfor %}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false diff --git a/promptsource/templates/wiki_hop/masked/templates.yaml b/promptsource/templates/wiki_hop/masked/templates.yaml index ce6da0a71..1af4f7933 100644 --- a/promptsource/templates/wiki_hop/masked/templates.yaml +++ b/promptsource/templates/wiki_hop/masked/templates.yaml @@ -52,6 +52,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Accuracy original_task: false @@ -76,6 +78,8 @@ templates: {{ question_split[0] | replace("_", " ") }}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false @@ -101,6 +105,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Accuracy original_task: false @@ -115,6 +121,8 @@ templates: \ \n- {{ candidates | join(\"\\n- \") }}\n|||\n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Accuracy original_task: true @@ -140,6 +148,8 @@ templates: ") }}, {{answer}})' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false @@ -163,6 +173,8 @@ templates: {{ question_split[1:] | join(" ")}}' metadata: !TemplateMetadata choices_in_prompt: null + languages: + - en metrics: - Other original_task: false diff --git a/promptsource/templates/wiki_hop/original/templates.yaml b/promptsource/templates/wiki_hop/original/templates.yaml index f91d7070b..f1f620a3a 100644 --- a/promptsource/templates/wiki_hop/original/templates.yaml +++ b/promptsource/templates/wiki_hop/original/templates.yaml @@ -11,6 +11,8 @@ templates: \ | join(\"\\n - \") }}\n\n|||\n{{answer}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -40,6 +42,8 @@ templates: {{ question_split[0] | replace("_", " ") }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -69,6 +73,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -98,6 +104,8 @@ templates: {{ question_split[1:] | join(" ")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -132,6 +140,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -168,6 +178,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -197,6 +209,8 @@ templates: {{ question_split[1:] | join(" ") }} , {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false @@ -231,6 +245,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -265,6 +281,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/wiki_qa/templates.yaml b/promptsource/templates/wiki_qa/templates.yaml index 0280936a1..6cea619fc 100644 --- a/promptsource/templates/wiki_qa/templates.yaml +++ b/promptsource/templates/wiki_qa/templates.yaml @@ -10,6 +10,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -31,6 +33,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -50,6 +54,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -72,6 +78,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -93,6 +101,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -113,6 +123,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -134,6 +146,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -156,6 +170,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -176,6 +192,8 @@ templates: {{answer_choices[label]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -199,6 +217,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -217,6 +237,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/wiki_split/templates.yaml b/promptsource/templates/wiki_split/templates.yaml index a542cda18..8a4ede350 100644 --- a/promptsource/templates/wiki_split/templates.yaml +++ b/promptsource/templates/wiki_split/templates.yaml @@ -15,6 +15,8 @@ templates: '''' ","")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -29,6 +31,8 @@ templates: ' '' \",\"\")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -44,6 +48,8 @@ templates: '''' ","")}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -59,6 +65,8 @@ templates: ' '' \",\"\")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -74,6 +82,8 @@ templates: ' '' \",\"\")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: true @@ -91,6 +101,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: false @@ -104,6 +116,8 @@ templates: \ sentence. \n|||\n{{complex_sentence|replace(\"' '' \",\"\")}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU original_task: false diff --git a/promptsource/templates/wino_bias/type1_anti/templates.yaml b/promptsource/templates/wino_bias/type1_anti/templates.yaml index 936e1c0a8..74c01a738 100644 --- a/promptsource/templates/wino_bias/type1_anti/templates.yaml +++ b/promptsource/templates/wino_bias/type1_anti/templates.yaml @@ -16,6 +16,8 @@ templates: {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -35,6 +37,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -54,6 +58,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -74,6 +80,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -93,6 +101,8 @@ templates: Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -112,6 +122,8 @@ templates: Here, by "{{ pronoun }}" they mean ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -141,6 +153,8 @@ templates: Answer: ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/wino_bias/type1_pro/templates.yaml b/promptsource/templates/wino_bias/type1_pro/templates.yaml index 50549e1b6..7cd5bae41 100644 --- a/promptsource/templates/wino_bias/type1_pro/templates.yaml +++ b/promptsource/templates/wino_bias/type1_pro/templates.yaml @@ -15,6 +15,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -34,6 +36,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -54,6 +58,8 @@ templates: {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -83,6 +89,8 @@ templates: Answer: ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -102,6 +110,8 @@ templates: Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -122,6 +132,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -141,6 +153,8 @@ templates: Here, by "{{ pronoun }}" they mean ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/wino_bias/type2_anti/templates.yaml b/promptsource/templates/wino_bias/type2_anti/templates.yaml index 221142a07..1309c445b 100644 --- a/promptsource/templates/wino_bias/type2_anti/templates.yaml +++ b/promptsource/templates/wino_bias/type2_anti/templates.yaml @@ -15,6 +15,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -35,6 +37,8 @@ templates: {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -54,6 +58,8 @@ templates: Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -73,6 +79,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -102,6 +110,8 @@ templates: Answer: ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -121,6 +131,8 @@ templates: Here, by "{{ pronoun }}" they mean ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -141,6 +153,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/wino_bias/type2_pro/templates.yaml b/promptsource/templates/wino_bias/type2_pro/templates.yaml index 298f712d4..39b239865 100644 --- a/promptsource/templates/wino_bias/type2_pro/templates.yaml +++ b/promptsource/templates/wino_bias/type2_pro/templates.yaml @@ -25,6 +25,8 @@ templates: Answer: ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -44,6 +46,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -63,6 +67,8 @@ templates: {{tokens | join(" ")}} ||| {{referent}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -83,6 +89,8 @@ templates: {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -102,6 +110,8 @@ templates: Here, by "{{ pronoun }}" they mean ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -121,6 +131,8 @@ templates: Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -141,6 +153,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/templates/winograd_wsc/wsc273/templates.yaml b/promptsource/templates/winograd_wsc/wsc273/templates.yaml index 41193b5c4..bafc72483 100644 --- a/promptsource/templates/winograd_wsc/wsc273/templates.yaml +++ b/promptsource/templates/winograd_wsc/wsc273/templates.yaml @@ -8,6 +8,8 @@ templates: or {{ answer_choices[1] }}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: {% endif %}\nAnswer: ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -36,6 +40,8 @@ templates: answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -49,6 +55,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -64,6 +72,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +87,8 @@ templates: \ }}?\n\nAnswer: ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -90,6 +102,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -102,6 +116,8 @@ templates: }}" or "{{ answer_choices[1]}}"? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winograd_wsc/wsc285/templates.yaml b/promptsource/templates/winograd_wsc/wsc285/templates.yaml index dcd70937d..f0607a70e 100644 --- a/promptsource/templates/winograd_wsc/wsc285/templates.yaml +++ b/promptsource/templates/winograd_wsc/wsc285/templates.yaml @@ -8,6 +8,8 @@ templates: or {{ answer_choices[1] }}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: {% endif %}\nAnswer: ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -36,6 +40,8 @@ templates: answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -49,6 +55,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -64,6 +72,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -77,6 +87,8 @@ templates: \ }}?\n\nAnswer: ||| {{ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -90,6 +102,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -102,6 +116,8 @@ templates: }}" or "{{ answer_choices[1]}}"? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winogrande/winogrande_debiased/templates.yaml b/promptsource/templates/winogrande/winogrande_debiased/templates.yaml index ed8050daf..633ffb623 100644 --- a/promptsource/templates/winogrande/winogrande_debiased/templates.yaml +++ b/promptsource/templates/winogrande/winogrande_debiased/templates.yaml @@ -9,6 +9,8 @@ templates: \ else %} {{ option2 }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -24,6 +26,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -37,6 +41,8 @@ templates: }} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +58,8 @@ templates: {{answer_choices[answer | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +75,8 @@ templates: {{answer_choices[answer|int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -91,6 +101,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winogrande/winogrande_l/templates.yaml b/promptsource/templates/winogrande/winogrande_l/templates.yaml index 5c105a3fb..e2ef50fa4 100644 --- a/promptsource/templates/winogrande/winogrande_l/templates.yaml +++ b/promptsource/templates/winogrande/winogrande_l/templates.yaml @@ -11,6 +11,8 @@ templates: {{answer_choices[answer|int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -35,6 +37,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -48,6 +52,8 @@ templates: }} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +67,8 @@ templates: \ else %} {{ option2 }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -76,6 +84,8 @@ templates: {{answer_choices[answer | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +101,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winogrande/winogrande_m/templates.yaml b/promptsource/templates/winogrande/winogrande_m/templates.yaml index cb7b4f25b..380e09b93 100644 --- a/promptsource/templates/winogrande/winogrande_m/templates.yaml +++ b/promptsource/templates/winogrande/winogrande_m/templates.yaml @@ -9,6 +9,8 @@ templates: \ else %} {{ option2 }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -33,6 +35,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -48,6 +52,8 @@ templates: {{answer_choices[answer|int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -61,6 +67,8 @@ templates: }} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -76,6 +84,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +101,8 @@ templates: {{answer_choices[answer | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winogrande/winogrande_s/templates.yaml b/promptsource/templates/winogrande/winogrande_s/templates.yaml index 7940a9924..496cb1905 100644 --- a/promptsource/templates/winogrande/winogrande_s/templates.yaml +++ b/promptsource/templates/winogrande/winogrande_s/templates.yaml @@ -9,6 +9,8 @@ templates: }} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -24,6 +26,8 @@ templates: {{answer_choices[answer|int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -39,6 +43,8 @@ templates: {{answer_choices[answer | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +58,8 @@ templates: \ else %} {{ option2 }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -76,6 +84,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +101,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winogrande/winogrande_xl/templates.yaml b/promptsource/templates/winogrande/winogrande_xl/templates.yaml index c47004153..11642583a 100644 --- a/promptsource/templates/winogrande/winogrande_xl/templates.yaml +++ b/promptsource/templates/winogrande/winogrande_xl/templates.yaml @@ -9,6 +9,8 @@ templates: }} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -24,6 +26,8 @@ templates: {{answer_choices[answer | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -39,6 +43,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -63,6 +69,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -78,6 +86,8 @@ templates: {{answer_choices[answer|int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -91,6 +101,8 @@ templates: \ else %} {{ option2 }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/winogrande/winogrande_xs/templates.yaml b/promptsource/templates/winogrande/winogrande_xs/templates.yaml index dfefa4fc7..074a1476b 100644 --- a/promptsource/templates/winogrande/winogrande_xs/templates.yaml +++ b/promptsource/templates/winogrande/winogrande_xs/templates.yaml @@ -9,6 +9,8 @@ templates: }} {% endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -24,6 +26,8 @@ templates: {{answer_choices[answer|int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -39,6 +43,8 @@ templates: %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -54,6 +60,8 @@ templates: {{answer_choices[answer | int - 1]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -67,6 +75,8 @@ templates: \ else %} {{ option2 }} {% endif %}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -91,6 +101,8 @@ templates: endif %}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/wiqa/templates.yaml b/promptsource/templates/wiqa/templates.yaml index fbcf818b9..66ec559a8 100644 --- a/promptsource/templates/wiqa/templates.yaml +++ b/promptsource/templates/wiqa/templates.yaml @@ -17,6 +17,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -45,6 +47,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -66,6 +70,8 @@ templates: {{ question_para_step | first }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -91,6 +97,8 @@ templates: ' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -120,13 +128,16 @@ templates: {{answer_label|replace("_", " ")}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true name: effect_with_string_answer reference: '' 667c291f-6a36-4334-aa49-804c9e72500b: !Template - answer_choices: 'indirectly impacting a step of the process ||| not impacting any step of the process' + answer_choices: indirectly impacting a step of the process ||| not impacting any + step of the process id: 667c291f-6a36-4334-aa49-804c9e72500b jinja: 'Process: @@ -156,13 +167,15 @@ templates: a step of the process"}[metadata_question_type]}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false name: which_of_the_following_is_the_supposed_perturbation reference: '' 6cf2b300-6790-4120-9592-9db63bec221b: !Template - answer_choices: 'A ||| B ||| C' + answer_choices: A ||| B ||| C id: 6cf2b300-6790-4120-9592-9db63bec221b jinja: 'Process: @@ -187,13 +200,15 @@ templates: {{answer_label_as_choice}}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true name: effect_with_label_answer reference: '' a17313bd-94bb-47ab-82bf-538df1b1ad5f: !Template - answer_choices: 'yes ||| no' + answer_choices: yes ||| no id: a17313bd-94bb-47ab-82bf-538df1b1ad5f jinja: 'Process: @@ -216,6 +231,8 @@ templates: "yes"}[metadata_question_type]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: false diff --git a/promptsource/templates/xnli/en/templates.yaml b/promptsource/templates/xnli/en/templates.yaml index 6b7da0d79..794f97416 100644 --- a/promptsource/templates/xnli/en/templates.yaml +++ b/promptsource/templates/xnli/en/templates.yaml @@ -10,6 +10,8 @@ templates: {{"inconclusive"}}? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -22,6 +24,8 @@ templates: \ no, or maybe? ||| {{answer_choices[label]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -36,6 +40,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -49,6 +55,8 @@ templates: ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -61,6 +69,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -73,6 +83,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -85,6 +97,8 @@ templates: no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -97,6 +111,8 @@ templates: Yes, no, or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -109,6 +125,8 @@ templates: ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -121,6 +139,8 @@ templates: or maybe? ||| {{ answer_choices[label] }} ' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -134,6 +154,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -147,6 +169,8 @@ templates: \ answer_choices[label] }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -159,6 +183,8 @@ templates: {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }} metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -172,6 +198,8 @@ templates: \ }}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -185,6 +213,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/xquad/xquad.en/templates.yaml b/promptsource/templates/xquad/xquad.en/templates.yaml index b547d3a8a..56cea06bc 100644 --- a/promptsource/templates/xquad/xquad.en/templates.yaml +++ b/promptsource/templates/xquad/xquad.en/templates.yaml @@ -10,6 +10,8 @@ templates: Generate a question from the above passage : ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -29,6 +31,8 @@ templates: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -41,6 +45,8 @@ templates: \n{{answers.text[0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -56,6 +62,8 @@ templates: as the answer would be: ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -78,6 +86,8 @@ templates: {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -95,6 +105,8 @@ templates: A: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/xquad_r/en/templates.yaml b/promptsource/templates/xquad_r/en/templates.yaml index 28cf13e83..a16091398 100644 --- a/promptsource/templates/xquad_r/en/templates.yaml +++ b/promptsource/templates/xquad_r/en/templates.yaml @@ -8,6 +8,8 @@ templates: \n{{answers.text[0]}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -29,6 +31,8 @@ templates: {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -47,6 +51,8 @@ templates: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true @@ -61,6 +67,8 @@ templates: Generate a question from the above passage : ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -77,6 +85,8 @@ templates: as the answer would be: ||| {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -95,6 +105,8 @@ templates: A: ||| {{answers["text"][0]}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Squad original_task: true diff --git a/promptsource/templates/xsum/templates.yaml b/promptsource/templates/xsum/templates.yaml index f2143ed2c..568304510 100644 --- a/promptsource/templates/xsum/templates.yaml +++ b/promptsource/templates/xsum/templates.yaml @@ -12,6 +12,8 @@ templates: Write a summary of the text above : ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -27,6 +29,8 @@ templates: Summary: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -41,6 +45,8 @@ templates: How would you rephrase that in a few words? ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -59,6 +65,8 @@ templates: So I recapped it in layman''s terms: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -73,6 +81,8 @@ templates: This boils down to the simple idea that ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -87,6 +97,8 @@ templates: {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -101,6 +113,8 @@ templates: Summary: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -119,6 +133,8 @@ templates: Given the above document, write one sentence to summarize: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -137,6 +153,8 @@ templates: Now, can you write me an extremely short abstract for it? ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU @@ -152,6 +170,8 @@ templates: TL;DR: ||| {{summary}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - ROUGE - BLEU diff --git a/promptsource/templates/yahoo_answers_qa/templates.yaml b/promptsource/templates/yahoo_answers_qa/templates.yaml index 7b306017f..d68d9da5e 100644 --- a/promptsource/templates/yahoo_answers_qa/templates.yaml +++ b/promptsource/templates/yahoo_answers_qa/templates.yaml @@ -6,6 +6,8 @@ templates: jinja: "Answer the following question: \n\n{{question}} |||\n{{nbestanswers|choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -21,6 +23,8 @@ templates: \ |join(', ')}}.\n\n{{question}} |||\n{{main_category}}\n" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: false @@ -35,6 +39,8 @@ templates: {{question}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -48,6 +54,8 @@ templates: {{nbestanswers|choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -62,6 +70,8 @@ templates: \ related to {{main_category}}. \n\n|||\n{{nbestanswers|choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -80,6 +90,8 @@ templates: {{answer}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -94,6 +106,8 @@ templates: \ \n\n|||\n{{nbestanswers|choice}}" metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE diff --git a/promptsource/templates/yahoo_answers_topics/templates.yaml b/promptsource/templates/yahoo_answers_topics/templates.yaml index d57d9244c..2dc36eb92 100644 --- a/promptsource/templates/yahoo_answers_topics/templates.yaml +++ b/promptsource/templates/yahoo_answers_topics/templates.yaml @@ -11,6 +11,8 @@ templates: {{ question_title}}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -27,6 +29,8 @@ templates: \ and passage: \n- {{answer_choices|join('\\n- ')}}\n|||\n{{ answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -46,6 +50,8 @@ templates: {{ best_answer }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - BLEU - ROUGE @@ -63,6 +69,8 @@ templates: \nAnswer: {{best_answer}}\n|||\n{{ answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +87,8 @@ templates: \ \n||| \n{{ answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -94,6 +104,8 @@ templates: \ pair from the list of topics: {{answer_choices|join(', ')}}.\n|||\n{{ answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +122,8 @@ templates: \ \n\n|||\n{{ answer_choices[topic]}}" metadata: !TemplateMetadata choices_in_prompt: true + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/yelp_polarity/templates.yaml b/promptsource/templates/yelp_polarity/templates.yaml index cf4b7ba80..45331a64b 100644 --- a/promptsource/templates/yelp_polarity/templates.yaml +++ b/promptsource/templates/yelp_polarity/templates.yaml @@ -8,6 +8,8 @@ templates: Overall, the experience is ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -21,6 +23,8 @@ templates: Based on that, my rating for this place is ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -34,6 +38,8 @@ templates: Did I regret it? ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -48,6 +54,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -66,6 +74,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -79,6 +89,8 @@ templates: That being said, I ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -97,6 +109,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -110,6 +124,8 @@ templates: In a nutshell, this place is ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -124,6 +140,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/yelp_review_full/templates.yaml b/promptsource/templates/yelp_review_full/templates.yaml index 7ac17c431..84c027c17 100644 --- a/promptsource/templates/yelp_review_full/templates.yaml +++ b/promptsource/templates/yelp_review_full/templates.yaml @@ -8,6 +8,8 @@ templates: So I would like to give it ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -23,6 +25,8 @@ templates: Based on that, my rating is ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -41,6 +45,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -52,6 +58,8 @@ templates: jinja: '{{ text }} My rating for this place is ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -70,6 +78,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -84,6 +94,8 @@ templates: }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true @@ -102,6 +114,8 @@ templates: {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Accuracy original_task: true diff --git a/promptsource/templates/zest/templates.yaml b/promptsource/templates/zest/templates.yaml index 157a0284b..5d2d2906f 100644 --- a/promptsource/templates/zest/templates.yaml +++ b/promptsource/templates/zest/templates.yaml @@ -22,6 +22,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -53,6 +55,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: false @@ -86,6 +90,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -117,6 +123,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -153,6 +161,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true @@ -182,6 +192,8 @@ templates: {% endif %}' metadata: !TemplateMetadata choices_in_prompt: false + languages: + - en metrics: - Other original_task: true diff --git a/promptsource/utils.py b/promptsource/utils.py index 4c19aa7cd..c8ae32bde 100644 --- a/promptsource/utils.py +++ b/promptsource/utils.py @@ -46,33 +46,25 @@ def get_dataset_builder(path, conf=None): def get_dataset(path, conf=None): "Get a dataset from name and conf." - builder_instance = get_dataset_builder(path, conf) - if builder_instance.manual_download_instructions is None and builder_instance.info.size_in_bytes is not None: - builder_instance.download_and_prepare() - return builder_instance.as_dataset() - else: - return load_dataset(path, conf) - - -def load_dataset(dataset_name, subset_name): try: - return datasets.load_dataset(dataset_name, subset_name) + return datasets.load_dataset(path, conf) except datasets.builder.ManualDownloadError: cache_root_dir = ( os.environ["PROMPTSOURCE_MANUAL_DATASET_DIR"] if "PROMPTSOURCE_MANUAL_DATASET_DIR" in os.environ else DEFAULT_PROMPTSOURCE_CACHE_HOME ) - data_dir = ( - f"{cache_root_dir}/{dataset_name}" - if subset_name is None - else f"{cache_root_dir}/{dataset_name}/{subset_name}" - ) - return datasets.load_dataset( - dataset_name, - subset_name, - data_dir=data_dir, - ) + data_dir = f"{cache_root_dir}/{path}" if conf is None else f"{cache_root_dir}/{path}/{conf}" + try: + return datasets.load_dataset( + path, + conf, + data_dir=data_dir, + ) + except Exception as err: + raise err + except Exception as err: + raise err def get_dataset_confs(path): @@ -132,9 +124,9 @@ def filter_english_datasets(): continue metadata = dataset["cardData"] - if "languages" not in metadata: + if "language" not in metadata: continue - languages = metadata["languages"] + languages = metadata["language"] if "en" in languages or "en-US" in languages: english_datasets.append(dataset_name)