diff --git a/Chapter15 - Intro to Federated Learning - Deep Learning on Unseen Data.ipynb b/Chapter15 - Intro to Federated Learning - Deep Learning on Unseen Data.ipynb index 14bbbef..4cbf72d 100644 --- a/Chapter15 - Intro to Federated Learning - Deep Learning on Unseen Data.ipynb +++ b/Chapter15 - Intro to Federated Learning - Deep Learning on Unseen Data.ipynb @@ -591,15 +591,15 @@ "\n", " # padding token should stay at 0\n", " model.weight.data[w2i['']] *= 0 \n", - " input = Tensor(input_data[b_i*bs:(b_i+1)*bs], autograd=True)\n", - " target = Tensor(target_data[b_i*bs:(b_i+1)*bs], autograd=True)\n", + " input = Tensor(input_data[b_i*batch_size:(b_i+1)*batch_size], autograd=True)\n", + " target = Tensor(target_data[b_i*batch_size:(b_i+1)*batch_size], autograd=True)\n", "\n", " pred = model.forward(input).sum(1).sigmoid()\n", " loss = criterion.forward(pred,target)\n", " loss.backward()\n", " optim.step()\n", "\n", - " iter_loss += loss.data[0] / bs\n", + " iter_loss += loss.data[0] / batch_size\n", "\n", " sys.stdout.write(\"\\r\\tLoss:\" + str(iter_loss / (b_i+1)))\n", " print()\n", @@ -732,6 +732,8 @@ } ], "source": [ + "import copy\n", + "\n", "for i in range(3):\n", " print(\"Starting Training Round...\")\n", " print(\"\\tStep 1: send the model to Bob\")\n", @@ -775,8 +777,6 @@ } ], "source": [ - "import copy\n", - "\n", "bobs_email = [\"my\", \"computer\", \"password\", \"is\", \"pizza\"]\n", "\n", "bob_input = np.array([[w2i[x] for x in bobs_email]])\n",