Skip to content

Commit dd646c1

Browse files
committed
Merge branch 'main' into torch-sdpa-preliminary-support
2 parents 3b47502 + a8e74eb commit dd646c1

File tree

974 files changed

+59929
-9141
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

974 files changed

+59929
-9141
lines changed

.circleci/config.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,7 @@ jobs:
209209
- run: make deps_table_check_updated
210210
- run: python utils/update_metadata.py --check-only
211211
- run: python utils/check_task_guides.py
212+
- run: python utils/check_docstrings.py
212213

213214
workflows:
214215
version: 2

.circleci/create_circleci_config.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,8 @@ def to_dict(self):
127127
},
128128
]
129129
steps.extend([{"run": l} for l in self.install_steps])
130+
steps.extend([{"run": 'pip install "fsspec>=2023.5.0,<2023.10.0"'}])
131+
steps.extend([{"run": "pip install pytest-subtests"}])
130132
steps.append(
131133
{
132134
"save_cache": {
@@ -311,7 +313,7 @@ def job_name(self):
311313
"pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
312314
],
313315
parallelism=1,
314-
pytest_num_workers=8,
316+
pytest_num_workers=6,
315317
)
316318

317319

@@ -347,6 +349,7 @@ def job_name(self):
347349
"pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm,video]",
348350
],
349351
marker="is_pipeline_test",
352+
pytest_num_workers=6,
350353
)
351354

352355

.github/workflows/build_documentation.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ jobs:
1515
commit_sha: ${{ github.sha }}
1616
package: transformers
1717
notebook_folder: transformers_doc
18-
languages: de en es fr it ko pt zh
18+
languages: de en es fr hi it ko pt zh ja te
1919
secrets:
2020
token: ${{ secrets.HUGGINGFACE_PUSH }}
2121
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}

.github/workflows/build_pr_documentation.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@ jobs:
1414
commit_sha: ${{ github.event.pull_request.head.sha }}
1515
pr_number: ${{ github.event.number }}
1616
package: transformers
17-
languages: de en es fr it ko pt zh
17+
languages: de en es fr hi it ko pt zh ja te

.github/workflows/self-nightly-scheduled.yml

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -21,36 +21,8 @@ env:
2121
RUN_PT_TF_CROSS_TESTS: 1
2222

2323
jobs:
24-
check_runner_status:
25-
name: Check Runner Status
26-
runs-on: ubuntu-latest
27-
steps:
28-
- name: Checkout transformers
29-
uses: actions/checkout@v3
30-
with:
31-
fetch-depth: 2
32-
33-
- name: Check Runner Status
34-
run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
35-
36-
check_runners:
37-
name: Check Runners
38-
needs: check_runner_status
39-
strategy:
40-
matrix:
41-
machine_type: [single-gpu, multi-gpu]
42-
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
43-
container:
44-
image: huggingface/transformers-all-latest-torch-nightly-gpu
45-
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
46-
steps:
47-
- name: NVIDIA-SMI
48-
run: |
49-
nvidia-smi
50-
5124
setup:
5225
name: Setup
53-
needs: check_runners
5426
strategy:
5527
matrix:
5628
machine_type: [single-gpu, multi-gpu]
@@ -276,8 +248,6 @@ jobs:
276248
runs-on: ubuntu-latest
277249
if: always()
278250
needs: [
279-
check_runner_status,
280-
check_runners,
281251
setup,
282252
run_tests_single_gpu,
283253
run_tests_multi_gpu,
@@ -288,8 +258,6 @@ jobs:
288258
shell: bash
289259
# For the meaning of these environment variables, see the job `Setup`
290260
run: |
291-
echo "Runner availability: ${{ needs.check_runner_status.result }}"
292-
echo "Runner status: ${{ needs.check_runners.result }}"
293261
echo "Setup status: ${{ needs.setup.result }}"
294262
295263
- uses: actions/checkout@v3
@@ -303,8 +271,6 @@ jobs:
303271
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
304272
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
305273
CI_EVENT: Nightly CI
306-
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
307-
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
308274
SETUP_STATUS: ${{ needs.setup.result }}
309275
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
310276
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.

.github/workflows/self-past.yml

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -32,36 +32,8 @@ env:
3232
RUN_PT_TF_CROSS_TESTS: 1
3333

3434
jobs:
35-
check_runner_status:
36-
name: Check Runner Status
37-
runs-on: ubuntu-latest
38-
steps:
39-
- name: Checkout transformers
40-
uses: actions/checkout@v3
41-
with:
42-
fetch-depth: 2
43-
44-
- name: Check Runner Status
45-
run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
46-
47-
check_runners:
48-
name: Check Runners
49-
needs: check_runner_status
50-
strategy:
51-
matrix:
52-
machine_type: [single-gpu, multi-gpu]
53-
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
54-
container:
55-
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
56-
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
57-
steps:
58-
- name: NVIDIA-SMI
59-
run: |
60-
nvidia-smi
61-
6235
setup:
6336
name: Setup
64-
needs: check_runners
6537
strategy:
6638
matrix:
6739
machine_type: [single-gpu, multi-gpu]
@@ -319,8 +291,6 @@ jobs:
319291
runs-on: ubuntu-latest
320292
if: always()
321293
needs: [
322-
check_runner_status,
323-
check_runners,
324294
setup,
325295
run_tests_single_gpu,
326296
run_tests_multi_gpu,
@@ -331,8 +301,6 @@ jobs:
331301
shell: bash
332302
# For the meaning of these environment variables, see the job `Setup`
333303
run: |
334-
echo "Runner availability: ${{ needs.check_runner_status.result }}"
335-
echo "Runner status: ${{ needs.check_runners.result }}"
336304
echo "Setup status: ${{ needs.setup.result }}"
337305
338306
- uses: actions/checkout@v3
@@ -351,8 +319,6 @@ jobs:
351319
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
352320
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
353321
CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
354-
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
355-
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
356322
SETUP_STATUS: ${{ needs.setup.result }}
357323
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
358324
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Self-hosted runner (AMD mi210 CI caller)
2+
3+
on:
4+
workflow_run:
5+
workflows: ["Self-hosted runner (push-caller)"]
6+
branches: ["main"]
7+
types: [completed]
8+
push:
9+
branches:
10+
- run_amd_push_ci_caller*
11+
paths:
12+
- "src/**"
13+
- "tests/**"
14+
- ".github/**"
15+
- "templates/**"
16+
- "utils/**"
17+
18+
jobs:
19+
run_amd_ci:
20+
name: AMD mi210
21+
if: (cancelled() != true) && ((github.event_name != 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller')))
22+
uses: ./.github/workflows/self-push-amd.yml
23+
with:
24+
gpu_flavor: mi210
25+
secrets: inherit
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Self-hosted runner (AMD mi250 CI caller)
2+
3+
on:
4+
workflow_run:
5+
workflows: ["Self-hosted runner (push-caller)"]
6+
branches: ["main"]
7+
types: [completed]
8+
push:
9+
branches:
10+
- run_amd_push_ci_caller*
11+
paths:
12+
- "src/**"
13+
- "tests/**"
14+
- ".github/**"
15+
- "templates/**"
16+
- "utils/**"
17+
18+
jobs:
19+
run_amd_ci:
20+
name: AMD mi250
21+
if: (cancelled() != true) && ((github.event_name != 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller')))
22+
uses: ./.github/workflows/self-push-amd.yml
23+
with:
24+
gpu_flavor: mi250
25+
secrets: inherit

.github/workflows/self-push-amd.yml

Lines changed: 9 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,11 @@
11
name: Self-hosted runner AMD GPU (push)
22

33
on:
4-
workflow_run:
5-
workflows: ["Self-hosted runner (push-caller)"]
6-
branches: ["main"]
7-
types: [completed]
8-
push:
9-
branches:
10-
- ci_*
11-
- ci-*
12-
paths:
13-
- "src/**"
14-
- "tests/**"
15-
- ".github/**"
16-
- "templates/**"
17-
- "utils/**"
18-
repository_dispatch:
4+
workflow_call:
5+
inputs:
6+
gpu_flavor:
7+
required: true
8+
type: string
199

2010
env:
2111
HF_HOME: /mnt/cache
@@ -45,8 +35,7 @@ jobs:
4535
strategy:
4636
matrix:
4737
machine_type: [single-gpu, multi-gpu]
48-
gpu_flavor: [mi210]
49-
runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ matrix.gpu_flavor }}']
38+
runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
5039
container:
5140
image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now
5241
options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
@@ -65,8 +54,7 @@ jobs:
6554
strategy:
6655
matrix:
6756
machine_type: [single-gpu, multi-gpu]
68-
gpu_flavor: [mi210]
69-
runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ matrix.gpu_flavor }}']
57+
runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
7058
container:
7159
image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now
7260
options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
@@ -164,8 +152,7 @@ jobs:
164152
matrix:
165153
folders: ${{ fromJson(needs.setup_gpu.outputs.matrix) }}
166154
machine_type: [single-gpu, multi-gpu]
167-
gpu_flavor: [mi210]
168-
runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ matrix.gpu_flavor }}']
155+
runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
169156
container:
170157
image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now
171158
options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
@@ -321,7 +308,7 @@ jobs:
321308
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
322309
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_AMD }}
323310
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
324-
CI_EVENT: push
311+
CI_EVENT: Push CI (AMD) - ${{ inputs.gpu_flavor }}
325312
CI_TITLE_PUSH: ${{ github.event.head_commit.message }}
326313
CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }}
327314
CI_SHA: ${{ env.CI_SHA }}

.github/workflows/self-push.yml

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -27,36 +27,8 @@ env:
2727
RUN_PT_TF_CROSS_TESTS: 1
2828

2929
jobs:
30-
check_runner_status:
31-
name: Check Runner Status
32-
runs-on: ubuntu-latest
33-
steps:
34-
- name: Checkout transformers
35-
uses: actions/checkout@v3
36-
with:
37-
fetch-depth: 2
38-
39-
- name: Check Runner Status
40-
run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
41-
42-
check_runners:
43-
name: Check Runners
44-
needs: check_runner_status
45-
strategy:
46-
matrix:
47-
machine_type: [single-gpu, multi-gpu]
48-
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, push-ci]
49-
container:
50-
image: huggingface/transformers-all-latest-gpu-push-ci
51-
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
52-
steps:
53-
- name: NVIDIA-SMI
54-
run: |
55-
nvidia-smi
56-
5730
setup:
5831
name: Setup
59-
needs: check_runners
6032
strategy:
6133
matrix:
6234
machine_type: [single-gpu, multi-gpu]
@@ -521,8 +493,6 @@ jobs:
521493
runs-on: ubuntu-latest
522494
if: always()
523495
needs: [
524-
check_runner_status,
525-
check_runners,
526496
setup,
527497
run_tests_single_gpu,
528498
run_tests_multi_gpu,
@@ -534,9 +504,7 @@ jobs:
534504
shell: bash
535505
# For the meaning of these environment variables, see the job `Setup`
536506
run: |
537-
echo "Runner availability: ${{ needs.check_runner_status.result }}"
538507
echo "Setup status: ${{ needs.setup.result }}"
539-
echo "Runner status: ${{ needs.check_runners.result }}"
540508
541509
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
542510
# We also take into account the `push` event (we might want to test some changes in a branch)
@@ -589,8 +557,6 @@ jobs:
589557
CI_TITLE_PUSH: ${{ github.event.head_commit.message }}
590558
CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }}
591559
CI_SHA: ${{ env.CI_SHA }}
592-
RUNNER_STATUS: ${{ needs.check_runner_status.result }}
593-
RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
594560
SETUP_STATUS: ${{ needs.setup.result }}
595561

596562
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change

0 commit comments

Comments
 (0)