Skip to content

Commit fa0650e

Browse files
authored
Merge pull request #6 from dantp-ai/refactoring/fix-orig-flake8-issues
Refactoring/fix orig flake8 issues
2 parents 38a3c24 + 279db0b commit fa0650e

20 files changed

+221
-170
lines changed

.github/workflows/minitorch.yml

+15-2
Original file line numberDiff line numberDiff line change
@@ -26,30 +26,43 @@ jobs:
2626
run: |
2727
# stop the build if there are Python syntax errors or undefined names
2828
flake8 --ignore "N801, E203, E266, E501, W503, F812, F401, F841, E741, N803, N802, N806" minitorch/ tests/ project/
29-
- name: Test with pytest
29+
30+
- name: Test Module 0
3031
run: |
3132
echo "Module 0"
3233
pytest tests -x -m task0_1
3334
pytest tests -x -m task0_2
3435
pytest tests -x -m task0_3
3536
pytest tests -x -m task0_4
37+
38+
- name: Test Module 1
39+
run: |
3640
echo "Module 1"
3741
pytest tests -x -m task1_1
3842
pytest tests -x -m task1_2
3943
pytest tests -x -m task1_3
4044
pytest tests -x -m task1_4
45+
46+
- name: Test Module 2
47+
run: |
4148
echo "Module 2"
4249
pytest tests -x -m task2_1
4350
pytest tests -x -m task2_2
4451
pytest tests -x -m task2_3
4552
pytest tests -x -m task2_4
53+
54+
- name: Test Module 3
55+
run: |
4656
echo "Module 3"
4757
pytest tests -x -m task3_1
4858
pytest tests -x -m task3_2
4959
pytest tests -x -m task3_3
5060
pytest tests -x -m task3_4
61+
62+
- name: Test Module 4
63+
run: |
5164
echo "Module 4"
5265
pytest tests -x -m task4_1
5366
pytest tests -x -m task4_2
5467
pytest tests -x -m task4_3
55-
pytest tests -x -m task4_4
68+
pytest tests -x -m task4_4

environment.yml

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
name: minitorch-conda-env-py38
2+
3+
channels:
4+
- conda-forge
5+
6+
dependencies:
7+
- python=3.8
8+
- chalk-diagram
9+
- colorama==0.4.3
10+
- hypothesis==6.54
11+
- mypy==0.971
12+
- numba==0.56
13+
- numpy==1.22
14+
- pre-commit==2.20.0
15+
- pytest==7.1.2
16+
- pytest-env
17+
- pytest-runner==5.2
18+
- typing_extensions
19+
- colour==0.1.5

minitorch/autodiff.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from dataclasses import dataclass
2-
from typing import Any, Iterable, List, Tuple
2+
from typing import Any, Iterable, List, Tuple # noqa: F401
33

44
from typing_extensions import Protocol
55

@@ -23,7 +23,7 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6)
2323
An approximation of $f'_i(x_0, \ldots, x_{n-1})$
2424
"""
2525
# TODO: Implement for Task 1.1.
26-
raise NotImplementedError('Need to implement for Task 1.1')
26+
raise NotImplementedError("Need to implement for Task 1.1")
2727

2828

2929
variable_count = 1
@@ -62,7 +62,7 @@ def topological_sort(variable: Variable) -> Iterable[Variable]:
6262
Non-constant Variables in topological order starting from the right.
6363
"""
6464
# TODO: Implement for Task 1.4.
65-
raise NotImplementedError('Need to implement for Task 1.4')
65+
raise NotImplementedError("Need to implement for Task 1.4")
6666

6767

6868
def backpropagate(variable: Variable, deriv: Any) -> None:
@@ -77,7 +77,7 @@ def backpropagate(variable: Variable, deriv: Any) -> None:
7777
No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
7878
"""
7979
# TODO: Implement for Task 1.4.
80-
raise NotImplementedError('Need to implement for Task 1.4')
80+
raise NotImplementedError("Need to implement for Task 1.4")
8181

8282

8383
@dataclass

minitorch/cuda_ops.py

+30-30
Original file line numberDiff line numberDiff line change
@@ -150,11 +150,11 @@ def _map(
150150
in_strides: Strides,
151151
) -> None:
152152

153-
out_index = cuda.local.array(MAX_DIMS, numba.int32)
154-
in_index = cuda.local.array(MAX_DIMS, numba.int32)
155-
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
153+
out_index = cuda.local.array(MAX_DIMS, numba.int32) # noqa: F841
154+
in_index = cuda.local.array(MAX_DIMS, numba.int32) # noqa: F841
155+
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x # noqa: F841
156156
# TODO: Implement for Task 3.3.
157-
raise NotImplementedError('Need to implement for Task 3.3')
157+
raise NotImplementedError("Need to implement for Task 3.3")
158158

159159
return cuda.jit()(_map) # type: ignore
160160

@@ -190,13 +190,13 @@ def _zip(
190190
b_strides: Strides,
191191
) -> None:
192192

193-
out_index = cuda.local.array(MAX_DIMS, numba.int32)
194-
a_index = cuda.local.array(MAX_DIMS, numba.int32)
195-
b_index = cuda.local.array(MAX_DIMS, numba.int32)
196-
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
193+
out_index = cuda.local.array(MAX_DIMS, numba.int32) # noqa: F841
194+
a_index = cuda.local.array(MAX_DIMS, numba.int32) # noqa: F841
195+
b_index = cuda.local.array(MAX_DIMS, numba.int32) # noqa: F841
196+
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x # noqa: F841
197197

198198
# TODO: Implement for Task 3.3.
199-
raise NotImplementedError('Need to implement for Task 3.3')
199+
raise NotImplementedError("Need to implement for Task 3.3")
200200

201201
return cuda.jit()(_zip) # type: ignore
202202

@@ -224,12 +224,12 @@ def _sum_practice(out: Storage, a: Storage, size: int) -> None:
224224
"""
225225
BLOCK_DIM = 32
226226

227-
cache = cuda.shared.array(BLOCK_DIM, numba.float64)
228-
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
229-
pos = cuda.threadIdx.x
227+
cache = cuda.shared.array(BLOCK_DIM, numba.float64) # noqa: F841
228+
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x # noqa: F841
229+
pos = cuda.threadIdx.x # noqa: F841
230230

231231
# TODO: Implement for Task 3.3.
232-
raise NotImplementedError('Need to implement for Task 3.3')
232+
raise NotImplementedError("Need to implement for Task 3.3")
233233

234234

235235
jit_sum_practice = cuda.jit()(_sum_practice)
@@ -273,13 +273,13 @@ def _reduce(
273273
reduce_value: float,
274274
) -> None:
275275
BLOCK_DIM = 1024
276-
cache = cuda.shared.array(BLOCK_DIM, numba.float64)
277-
out_index = cuda.local.array(MAX_DIMS, numba.int32)
278-
out_pos = cuda.blockIdx.x
279-
pos = cuda.threadIdx.x
276+
cache = cuda.shared.array(BLOCK_DIM, numba.float64) # noqa: F841
277+
out_index = cuda.local.array(MAX_DIMS, numba.int32) # noqa: F841
278+
out_pos = cuda.blockIdx.x # noqa: F841
279+
pos = cuda.threadIdx.x # noqa: F841
280280

281281
# TODO: Implement for Task 3.3.
282-
raise NotImplementedError('Need to implement for Task 3.3')
282+
raise NotImplementedError("Need to implement for Task 3.3")
283283

284284
return cuda.jit()(_reduce) # type: ignore
285285

@@ -314,9 +314,9 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None:
314314
b (Storage): storage for `b` tensor.
315315
size (int): size of the square
316316
"""
317-
BLOCK_DIM = 32
317+
BLOCK_DIM = 32 # noqa: F841
318318
# TODO: Implement for Task 3.3.
319-
raise NotImplementedError('Need to implement for Task 3.3')
319+
raise NotImplementedError("Need to implement for Task 3.3")
320320

321321

322322
jit_mm_practice = cuda.jit()(_mm_practice)
@@ -363,30 +363,30 @@ def _tensor_matrix_multiply(
363363
Returns:
364364
None : Fills in `out`
365365
"""
366-
a_batch_stride = a_strides[0] if a_shape[0] > 1 else 0
367-
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0
366+
a_batch_stride = a_strides[0] if a_shape[0] > 1 else 0 # noqa: F841
367+
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0 # noqa: F841
368368
# Batch dimension - fixed
369-
batch = cuda.blockIdx.z
369+
batch = cuda.blockIdx.z # noqa: F841
370370

371371
BLOCK_DIM = 32
372-
a_shared = cuda.shared.array((BLOCK_DIM, BLOCK_DIM), numba.float64)
373-
b_shared = cuda.shared.array((BLOCK_DIM, BLOCK_DIM), numba.float64)
372+
a_shared = cuda.shared.array((BLOCK_DIM, BLOCK_DIM), numba.float64) # noqa: F841
373+
b_shared = cuda.shared.array((BLOCK_DIM, BLOCK_DIM), numba.float64) # noqa: F841
374374

375375
# The final position c[i, j]
376-
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
377-
j = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
376+
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x # noqa: F841
377+
j = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y # noqa: F841
378378

379379
# The local position in the block.
380-
pi = cuda.threadIdx.x
381-
pj = cuda.threadIdx.y
380+
pi = cuda.threadIdx.x # noqa: F841
381+
pj = cuda.threadIdx.y # noqa: F841
382382

383383
# Code Plan:
384384
# 1) Move across shared dimension by block dim.
385385
# a) Copy into shared memory for a matrix.
386386
# b) Copy into shared memory for b matrix
387387
# c) Compute the dot produce for position c[i, j]
388388
# TODO: Implement for Task 3.4.
389-
raise NotImplementedError('Need to implement for Task 3.4')
389+
raise NotImplementedError("Need to implement for Task 3.4")
390390

391391

392392
tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply)

minitorch/fast_conv.py

+9-10
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
11
from typing import Tuple
22

3-
import numpy as np
4-
from numba import njit, prange
3+
import numpy as np # noqa: F401
4+
from numba import njit, prange # noqa: F401
55

66
from .autodiff import Context
77
from .tensor import Tensor
8+
from .tensor_data import MAX_DIMS, Index # noqa: F401
89
from .tensor_data import (
9-
MAX_DIMS,
10-
Index,
1110
Shape,
1211
Strides,
1312
broadcast_index,
@@ -77,11 +76,11 @@ def _tensor_conv1d(
7776
and in_channels == in_channels_
7877
and out_channels == out_channels_
7978
)
80-
s1 = input_strides
81-
s2 = weight_strides
79+
s1 = input_strides # noqa: F841
80+
s2 = weight_strides # noqa: F841
8281

8382
# TODO: Implement for Task 4.1.
84-
raise NotImplementedError('Need to implement for Task 4.1')
83+
raise NotImplementedError("Need to implement for Task 4.1")
8584

8685

8786
tensor_conv1d = njit(parallel=True)(_tensor_conv1d)
@@ -203,11 +202,11 @@ def _tensor_conv2d(
203202
s1 = input_strides
204203
s2 = weight_strides
205204
# inners
206-
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3]
207-
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3]
205+
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3] # noqa: F841
206+
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3] # noqa: F841
208207

209208
# TODO: Implement for Task 4.2.
210-
raise NotImplementedError('Need to implement for Task 4.2')
209+
raise NotImplementedError("Need to implement for Task 4.2")
211210

212211

213212
tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)

minitorch/fast_ops.py

+12-10
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,12 @@
22

33
from typing import TYPE_CHECKING
44

5-
import numpy as np
6-
from numba import njit, prange
5+
import numpy as np # noqa: F401
6+
from numba import njit
7+
from numba import prange # noqa: F401
78

9+
from .tensor_data import MAX_DIMS # noqa: F401
810
from .tensor_data import (
9-
MAX_DIMS,
1011
broadcast_index,
1112
index_to_position,
1213
shape_broadcast,
@@ -18,7 +19,8 @@
1819
from typing import Callable, Optional
1920

2021
from .tensor import Tensor
21-
from .tensor_data import Index, Shape, Storage, Strides
22+
from .tensor_data import Index # noqa: F401
23+
from .tensor_data import Shape, Storage, Strides
2224

2325
# TIP: Use `NUMBA_DISABLE_JIT=1 pytest tests/ -m task3_1` to run these tests without JIT.
2426

@@ -160,7 +162,7 @@ def _map(
160162
in_strides: Strides,
161163
) -> None:
162164
# TODO: Implement for Task 3.1.
163-
raise NotImplementedError('Need to implement for Task 3.1')
165+
raise NotImplementedError("Need to implement for Task 3.1")
164166

165167
return njit(parallel=True)(_map) # type: ignore
166168

@@ -199,7 +201,7 @@ def _zip(
199201
b_strides: Strides,
200202
) -> None:
201203
# TODO: Implement for Task 3.1.
202-
raise NotImplementedError('Need to implement for Task 3.1')
204+
raise NotImplementedError("Need to implement for Task 3.1")
203205

204206
return njit(parallel=True)(_zip) # type: ignore
205207

@@ -233,7 +235,7 @@ def _reduce(
233235
reduce_dim: int,
234236
) -> None:
235237
# TODO: Implement for Task 3.1.
236-
raise NotImplementedError('Need to implement for Task 3.1')
238+
raise NotImplementedError("Need to implement for Task 3.1")
237239

238240
return njit(parallel=True)(_reduce) # type: ignore
239241

@@ -279,11 +281,11 @@ def _tensor_matrix_multiply(
279281
Returns:
280282
None : Fills in `out`
281283
"""
282-
a_batch_stride = a_strides[0] if a_shape[0] > 1 else 0
283-
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0
284+
a_batch_stride = a_strides[0] if a_shape[0] > 1 else 0 # noqa: F841
285+
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0 # noqa: F841
284286

285287
# TODO: Implement for Task 3.2.
286-
raise NotImplementedError('Need to implement for Task 3.2')
288+
raise NotImplementedError("Need to implement for Task 3.2")
287289

288290

289291
tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)

minitorch/module.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,12 @@ def modules(self) -> Sequence[Module]:
3232
def train(self) -> None:
3333
"Set the mode of this module and all descendent modules to `train`."
3434
# TODO: Implement for Task 0.4.
35-
raise NotImplementedError('Need to implement for Task 0.4')
35+
raise NotImplementedError("Need to implement for Task 0.4")
3636

3737
def eval(self) -> None:
3838
"Set the mode of this module and all descendent modules to `eval`."
3939
# TODO: Implement for Task 0.4.
40-
raise NotImplementedError('Need to implement for Task 0.4')
40+
raise NotImplementedError("Need to implement for Task 0.4")
4141

4242
def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
4343
"""
@@ -48,12 +48,12 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
4848
The name and `Parameter` of each ancestor parameter.
4949
"""
5050
# TODO: Implement for Task 0.4.
51-
raise NotImplementedError('Need to implement for Task 0.4')
51+
raise NotImplementedError("Need to implement for Task 0.4")
5252

5353
def parameters(self) -> Sequence[Parameter]:
5454
"Enumerate over all the parameters of this module and its descendents."
5555
# TODO: Implement for Task 0.4.
56-
raise NotImplementedError('Need to implement for Task 0.4')
56+
raise NotImplementedError("Need to implement for Task 0.4")
5757

5858
def add_parameter(self, k: str, v: Any) -> Parameter:
5959
"""

0 commit comments

Comments
 (0)