Skip to content

Commit 18e759e

Browse files
committed
code for fnns-d and fnns-de
1 parent 70e9275 commit 18e759e

13 files changed

+1632
-0
lines changed

demo.ipynb

+267
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,267 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"# import relevant packages\n",
10+
"import numpy as np \n",
11+
"import torch\n",
12+
"import matplotlib.pyplot as plt\n",
13+
"from imageio import imread, imwrite\n",
14+
"from torch import nn\n",
15+
"import random\n",
16+
"import argparse\n",
17+
"from PIL import Image\n",
18+
"from skimage.metrics import peak_signal_noise_ratio\n",
19+
"from skimage.metrics import structural_similarity\n",
20+
"from steganogan import SteganoGAN\n",
21+
"\n",
22+
"import torch\n",
23+
"from torch.optim import LBFGS\n",
24+
"import torch.nn.functional as F\n",
25+
"\n",
26+
"# set seed\n",
27+
"seed = 11111\n",
28+
"np.random.seed(seed)"
29+
]
30+
},
31+
{
32+
"cell_type": "code",
33+
"execution_count": 19,
34+
"metadata": {},
35+
"outputs": [],
36+
"source": [
37+
"# set paramaters\n",
38+
"# The mode can be random, pretrained-de or pretrained-d. Refer to the paper for details\n",
39+
"mode = \"pretrained-d\"\n",
40+
"steps = 2000\n",
41+
"max_iter = 10\n",
42+
"alpha = 0.1\n",
43+
"eps = 0.3\n",
44+
"num_bits = 1\n",
45+
"\n",
46+
"# some pre-trained steganoGAN models can be found here: https://drive.google.com/drive/folders/1-U2NDKUfqqI-Xd5IqT1nkymRQszAlubu?usp=sharing\n",
47+
"model_path = \"/home/vk352/FaceDetection/SteganoGAN/research/models/celeba_basic_1_1_mse10.steg\"\n"
48+
]
49+
},
50+
{
51+
"cell_type": "code",
52+
"execution_count": 20,
53+
"metadata": {},
54+
"outputs": [
55+
{
56+
"name": "stdout",
57+
"output_type": "stream",
58+
"text": [
59+
"Using CUDA device\n"
60+
]
61+
}
62+
],
63+
"source": [
64+
"steganogan = SteganoGAN.load(path=model_path, cuda=True, verbose=True)\n",
65+
"input_im = \"/home/vk352/FaceDetection/datasets/div2k/val/512/0801.jpg\"\n",
66+
"output_im = \"steganographic.png\""
67+
]
68+
},
69+
{
70+
"cell_type": "code",
71+
"execution_count": 21,
72+
"metadata": {},
73+
"outputs": [
74+
{
75+
"name": "stdout",
76+
"output_type": "stream",
77+
"text": [
78+
"Encoding completed.\n"
79+
]
80+
}
81+
],
82+
"source": [
83+
"inp_image = imread(input_im, pilmode='RGB')\n",
84+
"\n",
85+
"# you can add a custom target message here \n",
86+
"target = torch.bernoulli(torch.empty(1, num_bits, inp_image.shape[1], inp_image.shape[0]).uniform_(0, 1)).to('cuda')\n",
87+
"\n",
88+
"steganogan.encode(input_im, output_im, target)\n",
89+
"output = steganogan.decode(output_im)\n",
90+
"\n",
91+
"if mode == \"pretrained-de\":\n",
92+
" image = output_im\n",
93+
"else:\n",
94+
" image = input_im\n",
95+
"\n",
96+
"image = imread(image, pilmode='RGB') / 255.0\n",
97+
"image = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)\n",
98+
"image = image.to('cuda')"
99+
]
100+
},
101+
{
102+
"cell_type": "code",
103+
"execution_count": 22,
104+
"metadata": {},
105+
"outputs": [
106+
{
107+
"name": "stdout",
108+
"output_type": "stream",
109+
"text": [
110+
"PSNR: 21.819463907593587\n",
111+
"SSIM: 0.84231546457805\n",
112+
"Iniitial error: 0.01287841796875\n"
113+
]
114+
}
115+
],
116+
"source": [
117+
"#initial statistics:\n",
118+
"\n",
119+
"im1 = np.array(imread(input_im, pilmode='RGB')).astype(float)\n",
120+
"im2 = np.array(imread(output_im, pilmode='RGB')).astype(float)\n",
121+
"print(\"PSNR:\", peak_signal_noise_ratio(im1, im2, data_range=255))\n",
122+
"print(\"SSIM:\",structural_similarity(im1, im2, data_range=255, multichannel=True))\n",
123+
"err = ((target !=output.float()).sum().item()+0.0)/target.numel()\n",
124+
"print(\"Iniitial error:\", err)"
125+
]
126+
},
127+
{
128+
"cell_type": "code",
129+
"execution_count": 23,
130+
"metadata": {},
131+
"outputs": [
132+
{
133+
"name": "stdout",
134+
"output_type": "stream",
135+
"text": [
136+
"Error: 0.12554550170898438\n",
137+
"Error: 0.036365509033203125\n",
138+
"Error: 0.0092620849609375\n",
139+
"Error: 0.002719879150390625\n",
140+
"Error: 0.0006561279296875\n",
141+
"Error: 0.031497955322265625\n",
142+
"Error: 0.00734710693359375\n",
143+
"Error: 0.001422882080078125\n",
144+
"Error: 0.000141143798828125\n",
145+
"Error: 7.62939453125e-06\n",
146+
"Error: 0.0\n",
147+
"Error: 3.814697265625e-06\n",
148+
"Error: 0.0\n",
149+
"Error: 7.62939453125e-06\n",
150+
"Error: 0.0\n",
151+
"Error: 0.0\n",
152+
"Error: 0.0\n",
153+
"Error: 0.0\n",
154+
"Error: 0.0\n",
155+
"Error: 0.0\n",
156+
"Error: 0.0\n",
157+
"Error: 0.0\n"
158+
]
159+
}
160+
],
161+
"source": [
162+
"# FNNS Optimization\n",
163+
"model = steganogan.decoder \n",
164+
"criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')\n",
165+
"\n",
166+
"\n",
167+
"out = model(image)\n",
168+
"target = target.to(out.device)\n",
169+
"\n",
170+
"count = 0\n",
171+
"\n",
172+
"adv_image = image.clone().detach()\n",
173+
"\n",
174+
"for i in range(steps // max_iter):\n",
175+
" adv_image.requires_grad = True\n",
176+
" optimizer = LBFGS([adv_image], lr=alpha, max_iter=max_iter)\n",
177+
"\n",
178+
" def closure():\n",
179+
" outputs = model(adv_image)\n",
180+
" loss = criterion(outputs, target)\n",
181+
"\n",
182+
"\n",
183+
" optimizer.zero_grad()\n",
184+
" loss.backward()\n",
185+
" return loss\n",
186+
"\n",
187+
" optimizer.step(closure)\n",
188+
" delta = torch.clamp(adv_image - image, min=-eps, max=eps)\n",
189+
" adv_image = torch.clamp(image + delta, min=0, max=1).detach()\n",
190+
"\n",
191+
" err = len(torch.nonzero((model(adv_image)>0).float().view(-1) != target.view(-1))) / target.numel()\n",
192+
" print(\"Error:\", err)\n",
193+
" if err < 0.00001: eps = 0.7\n",
194+
" if err==0: count+=1; eps = 0.3\n",
195+
" if count==10: break"
196+
]
197+
},
198+
{
199+
"cell_type": "code",
200+
"execution_count": 18,
201+
"metadata": {},
202+
"outputs": [
203+
{
204+
"name": "stdout",
205+
"output_type": "stream",
206+
"text": [
207+
"PSNR: 33.14091471588373\n",
208+
"SSIM: 0.9280028725210056\n",
209+
"Error: 0.0\n",
210+
"\n",
211+
"After writing to file and reading from file\n",
212+
"PSNR: 33.09258549548191\n",
213+
"SSIM: 0.9276992026437099\n",
214+
"Error: 0.0\n"
215+
]
216+
}
217+
],
218+
"source": [
219+
"# print final statistics\n",
220+
"\n",
221+
"print(\"PSNR:\", peak_signal_noise_ratio(np.array(imread(input_im, pilmode='RGB')).astype(float), (adv_image.squeeze().permute(2,1,0)*255).detach().cpu().numpy(), data_range=255))\n",
222+
"print(\"SSIM:\", structural_similarity(np.array(imread(input_im, pilmode='RGB')).astype(float), (adv_image.squeeze().permute(2,1,0)*255).detach().cpu().numpy(), data_range=255, multichannel=True))\n",
223+
"print(\"Error:\", err)\n",
224+
"lbfgsimg = (adv_image.cpu().squeeze().permute(2,1,0).numpy()*255).astype(np.uint8)\n",
225+
"\n",
226+
"Image.fromarray(lbfgsimg).save(output_im)\n",
227+
"image_read = imread(output_im, pilmode='RGB') / 255.0\n",
228+
"image_read = torch.FloatTensor(image_read).permute(2, 1, 0).unsqueeze(0).to('cuda')\n",
229+
"\n",
230+
"print(\"\\nAfter writing to file and reading from file\")\n",
231+
"im1 = np.array(imread(input_im, pilmode='RGB')).astype(float)\n",
232+
"im2 = np.array(imread(output_im, pilmode='RGB')).astype(float)\n",
233+
"print(\"PSNR:\", peak_signal_noise_ratio(im1, im2, data_range=255))\n",
234+
"print(\"SSIM:\", structural_similarity(im1, im2, data_range=255, multichannel=True))\n",
235+
"print(\"Error:\", len(torch.nonzero((model(image_read)>0).float().view(-1) != target.view(-1))) / target.numel())"
236+
]
237+
},
238+
{
239+
"cell_type": "code",
240+
"execution_count": null,
241+
"metadata": {},
242+
"outputs": [],
243+
"source": []
244+
}
245+
],
246+
"metadata": {
247+
"kernelspec": {
248+
"display_name": "rnns_vk",
249+
"language": "python",
250+
"name": "rnns_vk"
251+
},
252+
"language_info": {
253+
"codemirror_mode": {
254+
"name": "ipython",
255+
"version": 3
256+
},
257+
"file_extension": ".py",
258+
"mimetype": "text/x-python",
259+
"name": "python",
260+
"nbconvert_exporter": "python",
261+
"pygments_lexer": "ipython3",
262+
"version": "3.6.13"
263+
}
264+
},
265+
"nbformat": 4,
266+
"nbformat_minor": 4
267+
}

steganogan/__init__.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# -*- coding: utf-8 -*-
2+
"""Top-level package for SteganoGAN."""
3+
4+
__author__ = """MIT Data To AI Lab"""
5+
__email__ = '[email protected]'
6+
__version__ = '0.1.4-dev'
7+
8+
from steganogan.models import SteganoGAN
9+
10+
__all__ = ('SteganoGAN', )

steganogan/cli.py

+95
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
# -*- coding: utf-8 -*-
2+
3+
import argparse
4+
import warnings
5+
6+
from torch.serialization import SourceChangeWarning
7+
8+
from steganogan.models import SteganoGAN
9+
10+
warnings.filterwarnings('ignore', category=SourceChangeWarning)
11+
12+
13+
def _get_steganogan(args):
14+
15+
steganogan_kwargs = {
16+
'cuda': not args.cpu,
17+
'verbose': args.verbose
18+
}
19+
20+
if args.path:
21+
steganogan_kwargs['path'] = args.path
22+
else:
23+
steganogan_kwargs['architecture'] = args.architecture
24+
25+
return SteganoGAN.load(**steganogan_kwargs)
26+
27+
28+
def _encode(args):
29+
"""Given loads a pretrained pickel, encodes the image with it."""
30+
steganogan = _get_steganogan(args)
31+
steganogan.encode(args.cover, args.output, args.message)
32+
33+
34+
def _decode(args):
35+
try:
36+
steganogan = _get_steganogan(args)
37+
message = steganogan.decode(args.image)
38+
39+
if args.verbose:
40+
print('Message successfully decoded:')
41+
42+
print(message)
43+
44+
except Exception as e:
45+
print('ERROR: {}'.format(e))
46+
import traceback
47+
traceback.print_exc()
48+
49+
50+
def _get_parser():
51+
52+
# Parent Parser - Shared options
53+
parent = argparse.ArgumentParser(add_help=False)
54+
parent.add_argument('-v', '--verbose', action='store_true', help='Be verbose')
55+
group = parent.add_mutually_exclusive_group()
56+
group.add_argument('-a', '--architecture', default='dense',
57+
choices={'basic', 'dense', 'residual'},
58+
help='Model architecture. Use the same one for both encoding and decoding')
59+
60+
group.add_argument('-p', '--path', help='Load a pretrained model from a given path.')
61+
parent.add_argument('--cpu', action='store_true',
62+
help='Force CPU usage even if CUDA is available')
63+
64+
parser = argparse.ArgumentParser(description='SteganoGAN Command Line Interface')
65+
66+
subparsers = parser.add_subparsers(title='action', help='Action to perform')
67+
parser.set_defaults(action=None)
68+
69+
# Encode Parser
70+
encode = subparsers.add_parser('encode', parents=[parent],
71+
help='Hide a message into a steganographic image')
72+
encode.set_defaults(action=_encode)
73+
encode.add_argument('-o', '--output', default='output.png',
74+
help='Path and name to save the output image')
75+
encode.add_argument('cover', help='Path to the image to use as cover')
76+
encode.add_argument('message', help='Message to encode')
77+
78+
# Decode Parser
79+
decode = subparsers.add_parser('decode', parents=[parent],
80+
help='Read a message from a steganographic image')
81+
decode.set_defaults(action=_decode)
82+
decode.add_argument('image', help='Path to the image with the hidden message')
83+
84+
return parser
85+
86+
87+
def main():
88+
parser = _get_parser()
89+
args = parser.parse_args()
90+
91+
if not args.action:
92+
parser.print_help()
93+
parser.exit()
94+
95+
args.action(args)

0 commit comments

Comments
 (0)