Skip to content

Commit 3e5a5f7

Browse files
committed
Colaboratory를 통해 생성됨
1 parent 11d0851 commit 3e5a5f7

File tree

1 file changed

+25
-25
lines changed

1 file changed

+25
-25
lines changed

GAN/pix2pix(2016).ipynb

+25-25
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"name": "pix2pix(2016).ipynb",
77
"provenance": [],
88
"machine_shape": "hm",
9-
"authorship_tag": "ABX9TyN2YQrXN7TVuSj3zNIUbCHW",
9+
"authorship_tag": "ABX9TyMt3J3nGrabI++eewLqG6Ia",
1010
"include_colab_link": true
1111
},
1212
"kernelspec": {
@@ -64,7 +64,7 @@
6464
"\n",
6565
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
6666
],
67-
"execution_count": 23,
67+
"execution_count": null,
6868
"outputs": []
6969
},
7070
{
@@ -88,7 +88,7 @@
8888
"source": [
8989
"!git clone https://github.com/mrzhu-cool/pix2pix-pytorch # 깃 클론"
9090
],
91-
"execution_count": 2,
91+
"execution_count": null,
9292
"outputs": [
9393
{
9494
"output_type": "stream",
@@ -110,7 +110,7 @@
110110
"source": [
111111
"!mkdir 'data' # 폴더 생성"
112112
],
113-
"execution_count": 3,
113+
"execution_count": null,
114114
"outputs": []
115115
},
116116
{
@@ -156,7 +156,7 @@
156156
" def __len__(self):\n",
157157
" return len(self.img_filenames)"
158158
],
159-
"execution_count": 69,
159+
"execution_count": null,
160160
"outputs": []
161161
},
162162
{
@@ -172,7 +172,7 @@
172172
" transforms.Resize((256,256))\n",
173173
"])"
174174
],
175-
"execution_count": 70,
175+
"execution_count": null,
176176
"outputs": []
177177
},
178178
{
@@ -185,7 +185,7 @@
185185
"path2img = '/content/data/facades/train'\n",
186186
"train_ds = FacadeDataset(path2img, transform=transform)"
187187
],
188-
"execution_count": 71,
188+
"execution_count": null,
189189
"outputs": []
190190
},
191191
{
@@ -209,7 +209,7 @@
209209
"plt.imshow(to_pil_image(0.5*b+0.5))\n",
210210
"plt.axis('off')"
211211
],
212-
"execution_count": 72,
212+
"execution_count": null,
213213
"outputs": [
214214
{
215215
"output_type": "execute_result",
@@ -247,7 +247,7 @@
247247
"# 데이터 로더 생성하기\n",
248248
"train_dl = DataLoader(train_ds, batch_size=32, shuffle=True)"
249249
],
250-
"execution_count": 73,
250+
"execution_count": null,
251251
"outputs": []
252252
},
253253
{
@@ -296,7 +296,7 @@
296296
"down_out = model(x)\n",
297297
"print(down_out.shape)"
298298
],
299-
"execution_count": 74,
299+
"execution_count": null,
300300
"outputs": [
301301
{
302302
"output_type": "stream",
@@ -343,7 +343,7 @@
343343
"out = model(x,down_out)\n",
344344
"print(out.shape)"
345345
],
346-
"execution_count": 75,
346+
"execution_count": null,
347347
"outputs": [
348348
{
349349
"output_type": "stream",
@@ -417,7 +417,7 @@
417417
"out = model(x)\n",
418418
"print(out.shape)"
419419
],
420-
"execution_count": 76,
420+
"execution_count": null,
421421
"outputs": [
422422
{
423423
"output_type": "stream",
@@ -459,7 +459,7 @@
459459
"out = model(x)\n",
460460
"print(out.shape)"
461461
],
462-
"execution_count": 77,
462+
"execution_count": null,
463463
"outputs": [
464464
{
465465
"output_type": "stream",
@@ -482,7 +482,7 @@
482482
"source": [
483483
"# Discriminator은 patch gan을 사용합니다.\n",
484484
"# Patch Gan: 이미지를 16x16의 패치로 분할하여 각 패치가 진짜인지 가짜인지 식별합니다.\n",
485-
"# low-frequency에서 정확도가 향상됩니다.\n",
485+
"# high-frequency에서 정확도가 향상됩니다.\n",
486486
"\n",
487487
"class Discriminator(nn.Module):\n",
488488
" def __init__(self, in_channels=3):\n",
@@ -510,7 +510,7 @@
510510
"out = model(x,x)\n",
511511
"print(out.shape)"
512512
],
513-
"execution_count": 78,
513+
"execution_count": null,
514514
"outputs": [
515515
{
516516
"output_type": "stream",
@@ -530,7 +530,7 @@
530530
"model_gen = GeneratorUNet().to(device)\n",
531531
"model_dis = Discriminator().to(device)"
532532
],
533-
"execution_count": 79,
533+
"execution_count": null,
534534
"outputs": []
535535
},
536536
{
@@ -550,7 +550,7 @@
550550
"model_gen.apply(initialize_weights);\n",
551551
"model_dis.apply(initialize_weights);"
552552
],
553-
"execution_count": 80,
553+
"execution_count": null,
554554
"outputs": []
555555
},
556556
{
@@ -587,7 +587,7 @@
587587
"opt_dis = optim.Adam(model_dis.parameters(),lr=lr,betas=(beta1,beta2))\n",
588588
"opt_gen = optim.Adam(model_gen.parameters(),lr=lr,betas=(beta1,beta2))"
589589
],
590-
"execution_count": 81,
590+
"execution_count": null,
591591
"outputs": []
592592
},
593593
{
@@ -656,7 +656,7 @@
656656
" if batch_count % 100 == 0:\n",
657657
" print('Epoch: %.0f, G_Loss: %.6f, D_Loss: %.6f, time: %.2f min' %(epoch, g_loss.item(), d_loss.item(), (time.time()-start_time)/60))"
658658
],
659-
"execution_count": 82,
659+
"execution_count": null,
660660
"outputs": [
661661
{
662662
"output_type": "stream",
@@ -700,7 +700,7 @@
700700
"plt.legend()\n",
701701
"plt.show()"
702702
],
703-
"execution_count": 83,
703+
"execution_count": null,
704704
"outputs": [
705705
{
706706
"output_type": "display_data",
@@ -732,7 +732,7 @@
732732
"torch.save(model_gen.state_dict(), path2weights_gen)\n",
733733
"torch.save(model_dis.state_dict(), path2weights_dis)"
734734
],
735-
"execution_count": 84,
735+
"execution_count": null,
736736
"outputs": []
737737
},
738738
{
@@ -758,7 +758,7 @@
758758
"weights = torch.load(path2weights_gen)\n",
759759
"model_gen.load_state_dict(weights)"
760760
],
761-
"execution_count": 85,
761+
"execution_count": null,
762762
"outputs": [
763763
{
764764
"output_type": "execute_result",
@@ -790,7 +790,7 @@
790790
" real_imgs = b\n",
791791
" break"
792792
],
793-
"execution_count": 86,
793+
"execution_count": null,
794794
"outputs": []
795795
},
796796
{
@@ -815,7 +815,7 @@
815815
" plt.imshow(to_pil_image(0.5*fake_imgs[ii]+0.5))\n",
816816
" plt.axis('off')\n"
817817
],
818-
"execution_count": 87,
818+
"execution_count": null,
819819
"outputs": [
820820
{
821821
"output_type": "display_data",
@@ -840,7 +840,7 @@
840840
"source": [
841841
""
842842
],
843-
"execution_count": 87,
843+
"execution_count": null,
844844
"outputs": []
845845
}
846846
]

0 commit comments

Comments
 (0)