diff --git a/model_cards/allenai/wmt19-de-en-6-6-base/README.md b/model_cards/allenai/wmt19-de-en-6-6-base/README.md index 303a11cb6590..c946ad9f2f1d 100644 --- a/model_cards/allenai/wmt19-de-en-6-6-base/README.md +++ b/model_cards/allenai/wmt19-de-en-6-6-base/README.md @@ -61,7 +61,7 @@ Pretrained weights were left identical to the original model released by allenai Here are the BLEU scores: model | transformers --------|---------|---------- +-------|--------- wmt19-de-en-6-6-base | 38.37 The score was calculated using this code: diff --git a/model_cards/allenai/wmt19-de-en-6-6-big/README.md b/model_cards/allenai/wmt19-de-en-6-6-big/README.md index 515e1d6744d8..f675f899a16d 100644 --- a/model_cards/allenai/wmt19-de-en-6-6-big/README.md +++ b/model_cards/allenai/wmt19-de-en-6-6-big/README.md @@ -61,7 +61,7 @@ Pretrained weights were left identical to the original model released by allenai Here are the BLEU scores: model | transformers --------|---------|---------- +-------|--------- wmt19-de-en-6-6-big | 39.9 The score was calculated using this code: diff --git a/scripts/fsmt/gen-card-allenai-wmt19.py b/scripts/fsmt/gen-card-allenai-wmt19.py index b6bb97d6ac52..4df5ca0542e3 100755 --- a/scripts/fsmt/gen-card-allenai-wmt19.py +++ b/scripts/fsmt/gen-card-allenai-wmt19.py @@ -85,7 +85,7 @@ def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): Here are the BLEU scores: model | transformers --------|---------|---------- +-------|--------- {model_name} | {scores[model_name][1]} The score was calculated using this code: