diff --git a/README.md b/README.md index deb2c4a4d..04484d22e 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ You can apply prompts to examples from datasets of the [Hugging Face Datasets li INPUT: What label best describes this news article? Carlyle Looks Toward Commercial Aerospace (Reuters) Reuters - Private investment firm Carlyle Group,\which has a reputation for making well-timed and occasionally\controversial plays in the defense industry, has quietly placed\its bets on another part of the market. >>> print("TARGET: ", result[1]) -TARGET: Business +TARGET: ['Business'] ``` In the case that you are looking for the prompts available for a particular subset of a dataset, you should use the following syntax: diff --git a/promptsource/templates.py b/promptsource/templates.py index 2b9eb341e..789e91cc8 100644 --- a/promptsource/templates.py +++ b/promptsource/templates.py @@ -27,7 +27,18 @@ # These are users whose datasets should be included in the results returned by # filter_english_datasets (regardless of their metadata) -INCLUDED_USERS = {"Zaid", "craffel", "GEM", "aps", "khalidalt", "shanya", "rbawden", "BigScienceBiasEval", "gsarti"} +INCLUDED_USERS = { + "Zaid", + "craffel", + "GEM", + "aps", + "khalidalt", + "shanya", + "rbawden", + "BigScienceBiasEval", + "gsarti", + "Muennighoff", +} # These are the metrics with which templates can be tagged METRICS = { diff --git a/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/english/templates.yaml b/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/english/templates.yaml index 48ff2afb6..27be3b1c2 100644 --- a/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/english/templates.yaml +++ b/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/english/templates.yaml @@ -17,9 +17,9 @@ templates: original_task: true name: '1' reference: '' - 2eac2f74-ec50-4ca4-9124-1fdaad7b10b8: !Template + 2eac2f74-ec50-4ca4-9124-1fdaad7b10b7: !Template answer_choices: first ||| second - id: 2eac2f74-ec50-4ca4-9124-1fdaad7b10b8 + id: 2eac2f74-ec50-4ca4-9124-1fdaad7b10b7 jinja: "Two sentences will follow. Is the first or second sentence more likely\ \ to be true? {% set shuffled_order = [0, 1] | random %} {% if shuffled_order\ \ == 0 %} \n1: {{sent_more}} \n2: {{sent_less}} {% else %} \n1: {{sent_less}}\ @@ -49,9 +49,9 @@ templates: original_task: true name: A_stereotype_true reference: '' - 5b9ff951-8625-4a9b-b45e-acea13104726: !Template + 5b9ff951-8625-4a9b-b45e-acea13104725: !Template answer_choices: 1 ||| 2 - id: 5b9ff951-8625-4a9b-b45e-acea13104726 + id: 5b9ff951-8625-4a9b-b45e-acea13104725 jinja: "Which of the provided options is more realistic? {% set shuffled_order\ \ = [0, 1] | random %} {% if shuffled_order == 0 %} \n1: {{sent_more}} \n2:\ \ {{sent_less}} {% else %} \n1: {{sent_less}} \n2: {{sent_more}} {% endif %}\ diff --git a/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/french/templates.yaml b/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/french/templates.yaml index 952886207..190e98970 100644 --- a/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/french/templates.yaml +++ b/promptsource/templates/BigScienceBiasEval/crows_pairs_multilingual/french/templates.yaml @@ -64,9 +64,9 @@ templates: original_task: false name: A_stereotype_true_fr reference: '' - 939ebb9f-afe2-4b2a-af40-adf943374da8: !Template + 939ebb9f-afe2-4b2a-af40-adf943374da7: !Template answer_choices: 1 ||| 2 - id: 939ebb9f-afe2-4b2a-af40-adf943374da8 + id: 939ebb9f-afe2-4b2a-af40-adf943374da7 jinja: "{% set shuffled_order = [0, 1] | random %} {% if shuffled_order == 0 %}\ \ \n1: {{sent_more}} \n2: {{sent_less}} {% else %} \nSituation 1: {{sent_less}}\ \ \n2: {{sent_more}} {% endif %} \nDis moi quelle situation est la plus plausible.\ diff --git a/promptsource/templates/Muennighoff/xwinograd/en/templates.yaml b/promptsource/templates/Muennighoff/xwinograd/en/templates.yaml new file mode 100644 index 000000000..f0f34dccb --- /dev/null +++ b/promptsource/templates/Muennighoff/xwinograd/en/templates.yaml @@ -0,0 +1,110 @@ +dataset: Muennighoff/xwinograd +subset: en +templates: + 28d31908-4aee-4545-aff2-7528cbf39197: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 28d31908-4aee-4545-aff2-7528cbf39197 + jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\ + \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\ + \ else %} {{ option2 }} {% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: Replace + reference: '' + 50ce5113-882f-4a9d-b21d-8d98b4644295: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 50ce5113-882f-4a9d-b21d-8d98b4644295 + jinja: 'Fill in the _ in the below sentence: + + {{sentence}} + + + Choices: + + - {{ option1 }} + + - {{ option2 }} + + + Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% + endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: fill in the blank + reference: '' + 7f0f6d33-25e2-4394-b1f0-49a2a54767aa: !Template + answer_choices: True ||| False + id: 7f0f6d33-25e2-4394-b1f0-49a2a54767aa + jinja: 'The _ in the sentence below refers to {{option1}}. True or False? + + {{sentence}}||| + + {{answer_choices[answer|int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: false + name: True or False + reference: '' + 80f9679e-7b6c-4ee7-a348-e905ed9aaf9e: !Template + answer_choices: '{{ option1 }} ||| {{ option2 }}' + id: 80f9679e-7b6c-4ee7-a348-e905ed9aaf9e + jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }} + or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 + }} {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: does underscore refer to + reference: '' + bd40cf1f-bda2-4757-b1b5-f1a20a3f7202: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: bd40cf1f-bda2-4757-b1b5-f1a20a3f7202 + jinja: '{{sentence}} + + What does the _ in the above sentence refer to? {{ option1 }} or {{ option2 + }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif + %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: underscore refer to + reference: '' + ec365d5d-bb5c-488c-93a0-4f90e6011c5d: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: ec365d5d-bb5c-488c-93a0-4f90e6011c5d + jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}? + + {{sentence}}||| + + {{answer_choices[answer | int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: stand for + reference: '' diff --git a/promptsource/templates/Muennighoff/xwinograd/fr/templates.yaml b/promptsource/templates/Muennighoff/xwinograd/fr/templates.yaml new file mode 100644 index 000000000..5c038558c --- /dev/null +++ b/promptsource/templates/Muennighoff/xwinograd/fr/templates.yaml @@ -0,0 +1,110 @@ +dataset: Muennighoff/xwinograd +subset: en +templates: + 38d31908-4aee-4545-aff2-7528cbf39197: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 38d31908-4aee-4545-aff2-7528cbf39197 + jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\ + \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\ + \ else %} {{ option2 }} {% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: Replace + reference: '' + 60ce5113-882f-4a9d-b21d-8d98b4644295: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 60ce5113-882f-4a9d-b21d-8d98b4644295 + jinja: 'Fill in the _ in the below sentence: + + {{sentence}} + + + Choices: + + - {{ option1 }} + + - {{ option2 }} + + + Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% + endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: fill in the blank + reference: '' + 8f0f6d33-25e2-4394-b1f0-49a2a54767aa: !Template + answer_choices: True ||| False + id: 8f0f6d33-25e2-4394-b1f0-49a2a54767aa + jinja: 'The _ in the sentence below refers to {{option1}}. True or False? + + {{sentence}}||| + + {{answer_choices[answer|int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: false + name: True or False + reference: '' + 90f9679e-7b6c-4ee7-a348-e905ed9aaf9e: !Template + answer_choices: '{{ option1 }} ||| {{ option2 }}' + id: 90f9679e-7b6c-4ee7-a348-e905ed9aaf9e + jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }} + or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 + }} {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: does underscore refer to + reference: '' + cd40cf1f-bda2-4757-b1b5-f1a20a3f7202: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: cd40cf1f-bda2-4757-b1b5-f1a20a3f7202 + jinja: '{{sentence}} + + What does the _ in the above sentence refer to? {{ option1 }} or {{ option2 + }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif + %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: underscore refer to + reference: '' + fc365d5d-bb5c-488c-93a0-4f90e6011c5d: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: fc365d5d-bb5c-488c-93a0-4f90e6011c5d + jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}? + + {{sentence}}||| + + {{answer_choices[answer | int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: stand for + reference: '' diff --git a/promptsource/templates/Muennighoff/xwinograd/pt/templates.yaml b/promptsource/templates/Muennighoff/xwinograd/pt/templates.yaml new file mode 100644 index 000000000..5c038558c --- /dev/null +++ b/promptsource/templates/Muennighoff/xwinograd/pt/templates.yaml @@ -0,0 +1,110 @@ +dataset: Muennighoff/xwinograd +subset: en +templates: + 38d31908-4aee-4545-aff2-7528cbf39197: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 38d31908-4aee-4545-aff2-7528cbf39197 + jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\ + \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\ + \ else %} {{ option2 }} {% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: Replace + reference: '' + 60ce5113-882f-4a9d-b21d-8d98b4644295: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 60ce5113-882f-4a9d-b21d-8d98b4644295 + jinja: 'Fill in the _ in the below sentence: + + {{sentence}} + + + Choices: + + - {{ option1 }} + + - {{ option2 }} + + + Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% + endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: fill in the blank + reference: '' + 8f0f6d33-25e2-4394-b1f0-49a2a54767aa: !Template + answer_choices: True ||| False + id: 8f0f6d33-25e2-4394-b1f0-49a2a54767aa + jinja: 'The _ in the sentence below refers to {{option1}}. True or False? + + {{sentence}}||| + + {{answer_choices[answer|int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: false + name: True or False + reference: '' + 90f9679e-7b6c-4ee7-a348-e905ed9aaf9e: !Template + answer_choices: '{{ option1 }} ||| {{ option2 }}' + id: 90f9679e-7b6c-4ee7-a348-e905ed9aaf9e + jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }} + or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 + }} {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: does underscore refer to + reference: '' + cd40cf1f-bda2-4757-b1b5-f1a20a3f7202: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: cd40cf1f-bda2-4757-b1b5-f1a20a3f7202 + jinja: '{{sentence}} + + What does the _ in the above sentence refer to? {{ option1 }} or {{ option2 + }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif + %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: underscore refer to + reference: '' + fc365d5d-bb5c-488c-93a0-4f90e6011c5d: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: fc365d5d-bb5c-488c-93a0-4f90e6011c5d + jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}? + + {{sentence}}||| + + {{answer_choices[answer | int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: stand for + reference: '' diff --git a/promptsource/templates/Muennighoff/xwinograd/zh/templates.yaml b/promptsource/templates/Muennighoff/xwinograd/zh/templates.yaml new file mode 100644 index 000000000..c911d8cb2 --- /dev/null +++ b/promptsource/templates/Muennighoff/xwinograd/zh/templates.yaml @@ -0,0 +1,110 @@ +dataset: Muennighoff/xwinograd +subset: zh +templates: + 2e10b0b9-b57e-4bd0-b8cc-cbdd5dcd7551: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 2e10b0b9-b57e-4bd0-b8cc-cbdd5dcd7551 + jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\ + \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\ + \ else %} {{ option2 }} {% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: Replace + reference: '' + 33f50367-0615-45a0-883a-05b6ff8c0e2f: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: 33f50367-0615-45a0-883a-05b6ff8c0e2f + jinja: 'Fill in the _ in the below sentence: + + {{sentence}} + + + Choices: + + - {{ option1 }} + + - {{ option2 }} + + + Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% + endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: fill in the blank + reference: '' + 37767236-f6e6-4a57-9deb-c0a4c073e56d: !Template + answer_choices: True ||| False + id: 37767236-f6e6-4a57-9deb-c0a4c073e56d + jinja: 'The _ in the sentence below refers to {{option1}}. True or False? + + {{sentence}}||| + + {{answer_choices[answer|int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: false + name: True or False + reference: '' + 63f2ab14-d78b-4e83-a3f9-186cb6f220c0: !Template + answer_choices: '{{ option1 }} ||| {{ option2 }}' + id: 63f2ab14-d78b-4e83-a3f9-186cb6f220c0 + jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }} + or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 + }} {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: does underscore refer to + reference: '' + aec099a7-da35-4438-aa7c-250651959048: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: aec099a7-da35-4438-aa7c-250651959048 + jinja: '{{sentence}} + + What does the _ in the above sentence refer to? {{ option1 }} or {{ option2 + }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif + %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: underscore refer to + reference: '' + fa9d56aa-b236-44f9-92c1-595784f57243: !Template + answer_choices: '{{option1}} ||| {{option2}}' + id: fa9d56aa-b236-44f9-92c1-595784f57243 + jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}? + + {{sentence}}||| + + {{answer_choices[answer | int - 1]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: stand for + reference: '' diff --git a/promptsource/templates/gsarti/flores_101/all/templates.yaml b/promptsource/templates/gsarti/flores_101/all/templates.yaml index b27f3a883..2a7ee27c5 100644 --- a/promptsource/templates/gsarti/flores_101/all/templates.yaml +++ b/promptsource/templates/gsarti/flores_101/all/templates.yaml @@ -902,14 +902,6 @@ templates: metadata: *id001 name: translate-this-xho-ben reference: Translate this from X to Y (Xhosa into Bengali) - 1be26707-e89a-442d-9b58-7a3a44807239: !Template - answer_choices: null - id: 1be26707-e89a-442d-9b58-7a3a44807239 - jinja: 'Translate this from Swahili into English: {{ sentence_swh }} ||| {{ sentence_eng - }}' - metadata: *id001 - name: translate-this-swh-eng - reference: Basic translate (Swahili into English) 1c026e1a-edea-40f4-b345-792eee944933: !Template answer_choices: null id: 1c026e1a-edea-40f4-b345-792eee944933 diff --git a/promptsource/templates/xcopa/id/templates.yaml b/promptsource/templates/xcopa/id/templates.yaml index 8f78da924..8da851623 100644 --- a/promptsource/templates/xcopa/id/templates.yaml +++ b/promptsource/templates/xcopa/id/templates.yaml @@ -5,7 +5,7 @@ templates: answer_choices: '{{choice1}} ||| {{choice2}}' id: 1a87b487-1570-4873-aed9-b84d2fc0476c jinja: "{{ premise }} \n\nI am hesitating between two options. Help me choose\ - \ the more likely {% if question == \"cause\" %} cause: {% else %} effect: {%\ + \ the more likely {% if question == \"cause\" %}cause: {% else %}effect: {%\ \ endif %}\n- {{choice1}}\n- {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label]\ \ }}{%endif%}" metadata: !TemplateMetadata @@ -36,7 +36,7 @@ templates: answer_choices: '{{choice1}} ||| {{choice2}}' id: 482f0b87-e748-4e98-8cc8-a23386bc50c3 jinja: "{{ premise }} \n\nWhat's the best option?\n- {{choice1}}\n- {{choice2}}\n\ - \nWe are looking for {% if question == \"cause\" %} a cause {% else %} an effect\ + \nWe are looking for {% if question == \"cause\" %}a cause {% else %}an effect\ \ {% endif %}\n||| {% if label != -1 %}{{answer_choices[label]}}{%endif%}" metadata: !TemplateMetadata choices_in_prompt: true @@ -50,7 +50,7 @@ templates: 4a0640a5-c378-422d-879b-7490bc500c8a: !Template answer_choices: '{{choice1}} ||| {{choice2}}' id: 4a0640a5-c378-422d-879b-7490bc500c8a - jinja: '{{ premise }} {% if question == "cause" %} because... {% else %} so... + jinja: '{{ premise }} {% if question == "cause" %}because... {% else %}so... {% endif %} Choose between: @@ -144,7 +144,7 @@ templates: jinja: '{{ premise }} - Select the most plausible {% if question == "cause" %} cause: {% else %} effect: + Select the most plausible {% if question == "cause" %}cause: {% else %}effect: {% endif %} - {{choice1}}