Skip to content

Commit 246f231

Browse files
committed
Format with black and ignore F841 for now
1 parent 330bd63 commit 246f231

14 files changed

+92
-93
lines changed

minitorch/autodiff.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6)
2323
An approximation of $f'_i(x_0, \ldots, x_{n-1})$
2424
"""
2525
# TODO: Implement for Task 1.1.
26-
raise NotImplementedError('Need to implement for Task 1.1')
26+
raise NotImplementedError("Need to implement for Task 1.1")
2727

2828

2929
variable_count = 1
@@ -62,7 +62,7 @@ def topological_sort(variable: Variable) -> Iterable[Variable]:
6262
Non-constant Variables in topological order starting from the right.
6363
"""
6464
# TODO: Implement for Task 1.4.
65-
raise NotImplementedError('Need to implement for Task 1.4')
65+
raise NotImplementedError("Need to implement for Task 1.4")
6666

6767

6868
def backpropagate(variable: Variable, deriv: Any) -> None:
@@ -77,7 +77,7 @@ def backpropagate(variable: Variable, deriv: Any) -> None:
7777
No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
7878
"""
7979
# TODO: Implement for Task 1.4.
80-
raise NotImplementedError('Need to implement for Task 1.4')
80+
raise NotImplementedError("Need to implement for Task 1.4")
8181

8282

8383
@dataclass

minitorch/cuda_ops.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def _map(
154154
in_index = cuda.local.array(MAX_DIMS, numba.int32)
155155
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
156156
# TODO: Implement for Task 3.3.
157-
raise NotImplementedError('Need to implement for Task 3.3')
157+
raise NotImplementedError("Need to implement for Task 3.3")
158158

159159
return cuda.jit()(_map) # type: ignore
160160

@@ -196,7 +196,7 @@ def _zip(
196196
i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
197197

198198
# TODO: Implement for Task 3.3.
199-
raise NotImplementedError('Need to implement for Task 3.3')
199+
raise NotImplementedError("Need to implement for Task 3.3")
200200

201201
return cuda.jit()(_zip) # type: ignore
202202

@@ -229,7 +229,7 @@ def _sum_practice(out: Storage, a: Storage, size: int) -> None:
229229
pos = cuda.threadIdx.x
230230

231231
# TODO: Implement for Task 3.3.
232-
raise NotImplementedError('Need to implement for Task 3.3')
232+
raise NotImplementedError("Need to implement for Task 3.3")
233233

234234

235235
jit_sum_practice = cuda.jit()(_sum_practice)
@@ -279,7 +279,7 @@ def _reduce(
279279
pos = cuda.threadIdx.x
280280

281281
# TODO: Implement for Task 3.3.
282-
raise NotImplementedError('Need to implement for Task 3.3')
282+
raise NotImplementedError("Need to implement for Task 3.3")
283283

284284
return cuda.jit()(_reduce) # type: ignore
285285

@@ -316,7 +316,7 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None:
316316
"""
317317
BLOCK_DIM = 32
318318
# TODO: Implement for Task 3.3.
319-
raise NotImplementedError('Need to implement for Task 3.3')
319+
raise NotImplementedError("Need to implement for Task 3.3")
320320

321321

322322
jit_mm_practice = cuda.jit()(_mm_practice)
@@ -386,7 +386,7 @@ def _tensor_matrix_multiply(
386386
# b) Copy into shared memory for b matrix
387387
# c) Compute the dot produce for position c[i, j]
388388
# TODO: Implement for Task 3.4.
389-
raise NotImplementedError('Need to implement for Task 3.4')
389+
raise NotImplementedError("Need to implement for Task 3.4")
390390

391391

392392
tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply)

minitorch/fast_conv.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,11 @@ def _tensor_conv1d(
7676
and in_channels == in_channels_
7777
and out_channels == out_channels_
7878
)
79-
s1 = input_strides
80-
s2 = weight_strides
79+
s1 = input_strides # noqa: F841
80+
s2 = weight_strides # noqa: F841
8181

8282
# TODO: Implement for Task 4.1.
83-
raise NotImplementedError('Need to implement for Task 4.1')
83+
raise NotImplementedError("Need to implement for Task 4.1")
8484

8585

8686
tensor_conv1d = njit(parallel=True)(_tensor_conv1d)
@@ -202,11 +202,11 @@ def _tensor_conv2d(
202202
s1 = input_strides
203203
s2 = weight_strides
204204
# inners
205-
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3]
206-
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3]
205+
s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3] # noqa: F841
206+
s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3] # noqa: F841
207207

208208
# TODO: Implement for Task 4.2.
209-
raise NotImplementedError('Need to implement for Task 4.2')
209+
raise NotImplementedError("Need to implement for Task 4.2")
210210

211211

212212
tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)

minitorch/fast_ops.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def _map(
162162
in_strides: Strides,
163163
) -> None:
164164
# TODO: Implement for Task 3.1.
165-
raise NotImplementedError('Need to implement for Task 3.1')
165+
raise NotImplementedError("Need to implement for Task 3.1")
166166

167167
return njit(parallel=True)(_map) # type: ignore
168168

@@ -201,7 +201,7 @@ def _zip(
201201
b_strides: Strides,
202202
) -> None:
203203
# TODO: Implement for Task 3.1.
204-
raise NotImplementedError('Need to implement for Task 3.1')
204+
raise NotImplementedError("Need to implement for Task 3.1")
205205

206206
return njit(parallel=True)(_zip) # type: ignore
207207

@@ -235,7 +235,7 @@ def _reduce(
235235
reduce_dim: int,
236236
) -> None:
237237
# TODO: Implement for Task 3.1.
238-
raise NotImplementedError('Need to implement for Task 3.1')
238+
raise NotImplementedError("Need to implement for Task 3.1")
239239

240240
return njit(parallel=True)(_reduce) # type: ignore
241241

@@ -285,7 +285,7 @@ def _tensor_matrix_multiply(
285285
b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0
286286

287287
# TODO: Implement for Task 3.2.
288-
raise NotImplementedError('Need to implement for Task 3.2')
288+
raise NotImplementedError("Need to implement for Task 3.2")
289289

290290

291291
tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)

minitorch/module.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,12 @@ def modules(self) -> Sequence[Module]:
3232
def train(self) -> None:
3333
"Set the mode of this module and all descendent modules to `train`."
3434
# TODO: Implement for Task 0.4.
35-
raise NotImplementedError('Need to implement for Task 0.4')
35+
raise NotImplementedError("Need to implement for Task 0.4")
3636

3737
def eval(self) -> None:
3838
"Set the mode of this module and all descendent modules to `eval`."
3939
# TODO: Implement for Task 0.4.
40-
raise NotImplementedError('Need to implement for Task 0.4')
40+
raise NotImplementedError("Need to implement for Task 0.4")
4141

4242
def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
4343
"""
@@ -48,12 +48,12 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
4848
The name and `Parameter` of each ancestor parameter.
4949
"""
5050
# TODO: Implement for Task 0.4.
51-
raise NotImplementedError('Need to implement for Task 0.4')
51+
raise NotImplementedError("Need to implement for Task 0.4")
5252

5353
def parameters(self) -> Sequence[Parameter]:
5454
"Enumerate over all the parameters of this module and its descendents."
5555
# TODO: Implement for Task 0.4.
56-
raise NotImplementedError('Need to implement for Task 0.4')
56+
raise NotImplementedError("Need to implement for Task 0.4")
5757

5858
def add_parameter(self, k: str, v: Any) -> Parameter:
5959
"""

minitorch/nn.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
2424
assert height % kh == 0
2525
assert width % kw == 0
2626
# TODO: Implement for Task 4.3.
27-
raise NotImplementedError('Need to implement for Task 4.3')
27+
raise NotImplementedError("Need to implement for Task 4.3")
2828

2929

3030
def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
@@ -40,7 +40,7 @@ def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
4040
"""
4141
batch, channel, height, width = input.shape
4242
# TODO: Implement for Task 4.3.
43-
raise NotImplementedError('Need to implement for Task 4.3')
43+
raise NotImplementedError("Need to implement for Task 4.3")
4444

4545

4646
max_reduce = FastOps.reduce(operators.max, -1e9)
@@ -68,13 +68,13 @@ class Max(Function):
6868
def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor:
6969
"Forward of max should be max reduction"
7070
# TODO: Implement for Task 4.4.
71-
raise NotImplementedError('Need to implement for Task 4.4')
71+
raise NotImplementedError("Need to implement for Task 4.4")
7272

7373
@staticmethod
7474
def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
7575
"Backward of max should be argmax (see above)"
7676
# TODO: Implement for Task 4.4.
77-
raise NotImplementedError('Need to implement for Task 4.4')
77+
raise NotImplementedError("Need to implement for Task 4.4")
7878

7979

8080
def max(input: Tensor, dim: int) -> Tensor:
@@ -97,7 +97,7 @@ def softmax(input: Tensor, dim: int) -> Tensor:
9797
softmax tensor
9898
"""
9999
# TODO: Implement for Task 4.4.
100-
raise NotImplementedError('Need to implement for Task 4.4')
100+
raise NotImplementedError("Need to implement for Task 4.4")
101101

102102

103103
def logsoftmax(input: Tensor, dim: int) -> Tensor:
@@ -116,7 +116,7 @@ def logsoftmax(input: Tensor, dim: int) -> Tensor:
116116
log of softmax tensor
117117
"""
118118
# TODO: Implement for Task 4.4.
119-
raise NotImplementedError('Need to implement for Task 4.4')
119+
raise NotImplementedError("Need to implement for Task 4.4")
120120

121121

122122
def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
@@ -132,7 +132,7 @@ def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
132132
"""
133133
batch, channel, height, width = input.shape
134134
# TODO: Implement for Task 4.4.
135-
raise NotImplementedError('Need to implement for Task 4.4')
135+
raise NotImplementedError("Need to implement for Task 4.4")
136136

137137

138138
def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
@@ -148,4 +148,4 @@ def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
148148
tensor with random positions dropped out
149149
"""
150150
# TODO: Implement for Task 4.4.
151-
raise NotImplementedError('Need to implement for Task 4.4')
151+
raise NotImplementedError("Need to implement for Task 4.4")

minitorch/scalar.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -93,30 +93,30 @@ def __rtruediv__(self, b: ScalarLike) -> Scalar:
9393

9494
def __add__(self, b: ScalarLike) -> Scalar:
9595
# TODO: Implement for Task 1.2.
96-
raise NotImplementedError('Need to implement for Task 1.2')
96+
raise NotImplementedError("Need to implement for Task 1.2")
9797

9898
def __bool__(self) -> bool:
9999
return bool(self.data)
100100

101101
def __lt__(self, b: ScalarLike) -> Scalar:
102102
# TODO: Implement for Task 1.2.
103-
raise NotImplementedError('Need to implement for Task 1.2')
103+
raise NotImplementedError("Need to implement for Task 1.2")
104104

105105
def __gt__(self, b: ScalarLike) -> Scalar:
106106
# TODO: Implement for Task 1.2.
107-
raise NotImplementedError('Need to implement for Task 1.2')
107+
raise NotImplementedError("Need to implement for Task 1.2")
108108

109109
def __eq__(self, b: ScalarLike) -> Scalar: # type: ignore[override]
110110
# TODO: Implement for Task 1.2.
111-
raise NotImplementedError('Need to implement for Task 1.2')
111+
raise NotImplementedError("Need to implement for Task 1.2")
112112

113113
def __sub__(self, b: ScalarLike) -> Scalar:
114114
# TODO: Implement for Task 1.2.
115-
raise NotImplementedError('Need to implement for Task 1.2')
115+
raise NotImplementedError("Need to implement for Task 1.2")
116116

117117
def __neg__(self) -> Scalar:
118118
# TODO: Implement for Task 1.2.
119-
raise NotImplementedError('Need to implement for Task 1.2')
119+
raise NotImplementedError("Need to implement for Task 1.2")
120120

121121
def __radd__(self, b: ScalarLike) -> Scalar:
122122
return self + b
@@ -126,19 +126,19 @@ def __rmul__(self, b: ScalarLike) -> Scalar:
126126

127127
def log(self) -> Scalar:
128128
# TODO: Implement for Task 1.2.
129-
raise NotImplementedError('Need to implement for Task 1.2')
129+
raise NotImplementedError("Need to implement for Task 1.2")
130130

131131
def exp(self) -> Scalar:
132132
# TODO: Implement for Task 1.2.
133-
raise NotImplementedError('Need to implement for Task 1.2')
133+
raise NotImplementedError("Need to implement for Task 1.2")
134134

135135
def sigmoid(self) -> Scalar:
136136
# TODO: Implement for Task 1.2.
137-
raise NotImplementedError('Need to implement for Task 1.2')
137+
raise NotImplementedError("Need to implement for Task 1.2")
138138

139139
def relu(self) -> Scalar:
140140
# TODO: Implement for Task 1.2.
141-
raise NotImplementedError('Need to implement for Task 1.2')
141+
raise NotImplementedError("Need to implement for Task 1.2")
142142

143143
# Variable elements for backprop
144144

@@ -174,7 +174,7 @@ def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
174174
assert h.ctx is not None
175175

176176
# TODO: Implement for Task 1.3.
177-
raise NotImplementedError('Need to implement for Task 1.3')
177+
raise NotImplementedError("Need to implement for Task 1.3")
178178

179179
def backward(self, d_output: Optional[float] = None) -> None:
180180
"""

0 commit comments

Comments
 (0)