diff --git a/LICENSE/index.html b/LICENSE/index.html deleted file mode 100644 index 98cf89a2..00000000 --- a/LICENSE/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -LICENSE · Augmentor.jl

LICENSE

LICENSE

The Augmentor.jl package is licensed under the MIT "Expat" License:

Copyright (c) 2017: Christof Stocker.

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

diff --git a/assets/ConvertEltype.png b/assets/ConvertEltype.png deleted file mode 100644 index 8b7f43a3..00000000 Binary files a/assets/ConvertEltype.png and /dev/null differ diff --git a/assets/Crop.png b/assets/Crop.png deleted file mode 100644 index 16bef546..00000000 Binary files a/assets/Crop.png and /dev/null differ diff --git a/assets/CropRatio.png b/assets/CropRatio.png deleted file mode 100644 index 92d1d014..00000000 Binary files a/assets/CropRatio.png and /dev/null differ diff --git a/assets/CropSize.png b/assets/CropSize.png deleted file mode 100644 index 880dc245..00000000 Binary files a/assets/CropSize.png and /dev/null differ diff --git a/assets/ElasticDistortion.gif b/assets/ElasticDistortion.gif deleted file mode 100644 index b139bea1..00000000 Binary files a/assets/ElasticDistortion.gif and /dev/null differ diff --git a/assets/ElasticDistortion2.gif b/assets/ElasticDistortion2.gif deleted file mode 100644 index 064c6bb2..00000000 Binary files a/assets/ElasticDistortion2.gif and /dev/null differ diff --git a/assets/FlipX.png b/assets/FlipX.png deleted file mode 100644 index 0e64ac04..00000000 Binary files a/assets/FlipX.png and /dev/null differ diff --git a/assets/FlipY.png b/assets/FlipY.png deleted file mode 100644 index fb1da196..00000000 Binary files a/assets/FlipY.png and /dev/null differ diff --git a/assets/RCropRatio.gif b/assets/RCropRatio.gif deleted file mode 100644 index c1f8a790..00000000 Binary files a/assets/RCropRatio.gif and /dev/null differ diff --git a/assets/Resize.png b/assets/Resize.png deleted file mode 100644 index 1863f665..00000000 Binary files a/assets/Resize.png and /dev/null differ diff --git a/assets/Rotate.gif b/assets/Rotate.gif deleted file mode 100644 index d632a47c..00000000 Binary files a/assets/Rotate.gif and /dev/null differ diff --git a/assets/Rotate.png b/assets/Rotate.png deleted file mode 100644 index dfc4e322..00000000 Binary files a/assets/Rotate.png and /dev/null differ diff --git a/assets/Rotate180.png b/assets/Rotate180.png deleted file mode 100644 index 8d3bd5cc..00000000 Binary files a/assets/Rotate180.png and /dev/null differ diff --git a/assets/Rotate270.png b/assets/Rotate270.png deleted file mode 100644 index 1c86b8d7..00000000 Binary files a/assets/Rotate270.png and /dev/null differ diff --git a/assets/Rotate90.png b/assets/Rotate90.png deleted file mode 100644 index 9822c023..00000000 Binary files a/assets/Rotate90.png and /dev/null differ diff --git a/assets/Scale.gif b/assets/Scale.gif deleted file mode 100644 index 6fd59112..00000000 Binary files a/assets/Scale.gif and /dev/null differ diff --git a/assets/Scale.png b/assets/Scale.png deleted file mode 100644 index 41dea91b..00000000 Binary files a/assets/Scale.png and /dev/null differ diff --git a/assets/Scale2.png b/assets/Scale2.png deleted file mode 100644 index 31404f02..00000000 Binary files a/assets/Scale2.png and /dev/null differ diff --git a/assets/ShearX.gif b/assets/ShearX.gif deleted file mode 100644 index ae4e5d61..00000000 Binary files a/assets/ShearX.gif and /dev/null differ diff --git a/assets/ShearX.png b/assets/ShearX.png deleted file mode 100644 index 3e5f364d..00000000 Binary files a/assets/ShearX.png and /dev/null differ diff --git a/assets/ShearY.gif b/assets/ShearY.gif deleted file mode 100644 index e1cc2da3..00000000 Binary files a/assets/ShearY.gif and /dev/null differ diff --git a/assets/ShearY.png b/assets/ShearY.png deleted file mode 100644 index f862d247..00000000 Binary files a/assets/ShearY.png and /dev/null differ diff --git a/assets/Zoom.gif b/assets/Zoom.gif deleted file mode 100644 index bc7a1f1c..00000000 Binary files a/assets/Zoom.gif and /dev/null differ diff --git a/assets/Zoom.png b/assets/Zoom.png deleted file mode 100644 index ea3c11f3..00000000 Binary files a/assets/Zoom.png and /dev/null differ diff --git a/assets/arrow.svg b/assets/arrow.svg deleted file mode 100644 index ee2798d3..00000000 --- a/assets/arrow.svg +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - diff --git a/assets/bg_isic_in.png b/assets/bg_isic_in.png deleted file mode 100644 index 567fde0d..00000000 Binary files a/assets/bg_isic_in.png and /dev/null differ diff --git a/assets/bg_isic_out.png b/assets/bg_isic_out.png deleted file mode 100644 index b7cf6bde..00000000 Binary files a/assets/bg_isic_out.png and /dev/null differ diff --git a/assets/bg_mnist_in.png b/assets/bg_mnist_in.png deleted file mode 100644 index 82ad4c5a..00000000 Binary files a/assets/bg_mnist_in.png and /dev/null differ diff --git a/assets/bg_mnist_out.png b/assets/bg_mnist_out.png deleted file mode 100644 index 32638b2d..00000000 Binary files a/assets/bg_mnist_out.png and /dev/null differ diff --git a/assets/big_pattern.png b/assets/big_pattern.png deleted file mode 100644 index be8d255e..00000000 Binary files a/assets/big_pattern.png and /dev/null differ diff --git a/assets/cropn1.png b/assets/cropn1.png deleted file mode 100644 index 1a6954dc..00000000 Binary files a/assets/cropn1.png and /dev/null differ diff --git a/assets/cropn2.png b/assets/cropn2.png deleted file mode 100644 index a695991d..00000000 Binary files a/assets/cropn2.png and /dev/null differ diff --git a/assets/documenter.css b/assets/documenter.css deleted file mode 100644 index 26c8166f..00000000 --- a/assets/documenter.css +++ /dev/null @@ -1,573 +0,0 @@ -/* - * The default CSS style for Documenter.jl generated sites - * - * Heavily inspired by the Julia Sphinx theme - * https://github.com/JuliaLang/JuliaDoc - * which extends the sphinx_rtd_theme - * https://github.com/snide/sphinx_rtd_theme - * - * Part of Documenter.jl - * https://github.com/JuliaDocs/Documenter.jl - * - * License: MIT - */ - -/* fonts */ -body, input { - font-family: 'Lato', 'Helvetica Neue', Arial, sans-serif; - font-size: 16px; - color: #222; - text-rendering: optimizeLegibility; -} - -pre, code, kbd { - font-family: 'Roboto Mono', Monaco, courier, monospace; - font-size: 0.90em; -} - -pre code { - font-size: 1em; -} - -a { - color: #2980b9; - text-decoration: none; -} - -a:hover { - color: #3091d1; -} - -a:visited { - color: #9b59b6; -} - -body { - line-height: 1.5; -} - -h1 { - font-size: 1.75em; -} - -/* Unless the

the is very first thing on the page (i.e. the second element - * in the
, * after the
, we add some additional styling to it - * to make it stand out a bit more. This way we get a reasonable fallback if CSS3 - * selectors are not supported in the browser. - */ -article > h1:not(:nth-child(2)) { - margin: 2.5em 0 0; - padding-bottom: 0.30em; - border-bottom: 1px solid #e5e5e5; -} -h2 { - font-size: 1.50em; - margin: 2.3em 0 0; - padding-bottom: 0.25em; - border-bottom: 1px solid #e5e5e5; -} -h3 { - font-size: 1.25em; - margin: 2.0em 0 0; -} -h4 { font-size: 1.15em; } -h5 { font-size: 1.10em; } -h6 { font-size: 1em; } - -h4, h5, h6 { - margin-top: 1.5em; - margin-bottom: 1em; -} - -img { - max-width: 100%; -} - -table { - border-collapse: collapse; - margin: 1em 0; -} - -th, td { - border: 1px solid #e1e4e5; - padding: 0.5em 1em; -} - -th { - border-bottom-width: 2px; -} - -tr:nth-child(even) { - background-color: #f3f6f6; -} - -hr { - border: 0; - border-top: 1px solid #e5e5e5; -} - -/* Inline code and code blocks */ - -code { - padding: 0.1em; - background-color: rgba(0,0,0,.04); - border-radius: 3px; -} - -pre { - background-color: #f5f5f5; - border: 1px solid #dddddd; - border-radius: 3px; - padding: 0.5em; - overflow: auto; -} - -pre code { - padding: 0; - background-color: initial; -} - -kbd { - font-size: 0.70em; - display: inline-block; - padding: 0.1em 0.5em 0.4em 0.5em; - line-height: 1.0em; - color: #444d56; - vertical-align: middle; - background-color: #fafbfc; - border: solid 1px #c6cbd1; - border-bottom-color: #959da5; - border-radius: 3px; - box-shadow: inset 0 -1px 0 #959da5; -} - -/* Headers in admonitions and docstrings */ -.admonition h1, -article section.docstring h1 { - font-size: 1.25em; -} - -.admonition h2, -article section.docstring h2 { - font-size: 1.10em; -} - -.admonition h3, -.admonition h4, -.admonition h5, -.admonition h6, -article section.docstring h3, -article section.docstring h4, -article section.docstring h5, -article section.docstring h6 { - font-size: 1em; -} - -/* Navigation */ -nav.toc { - position: fixed; - top: 0; - left: 0; - bottom: 0; - width: 20em; - overflow-y: auto; - padding: 1em 0; - background-color: #fcfcfc; - box-shadow: inset -14px 0px 5px -12px rgb(210,210,210); -} - -nav.toc .logo { - margin: 0 auto; - display: block; - max-height: 6em; - max-width: 18em; -} - -nav.toc h1 { - text-align: center; - margin-top: .57em; - margin-bottom: 0; -} - -nav.toc select { - display: block; - height: 2em; - padding: 0 1.6em 0 1em; - min-width: 7em; - max-width: 90%; - max-width: calc(100% - 5em); - margin: 0 auto; - font-size: .83em; - border: 1px solid #c9c9c9; - border-radius: 1em; - - /* TODO: doesn't seem to be centered on Safari */ - text-align: center; - text-align-last: center; - - appearance: none; - -moz-appearance: none; - -webkit-appearance: none; - - background: white url("arrow.svg"); - background-size: 1.155em; - background-repeat: no-repeat; - background-position: right; -} - -nav.toc select:hover { - border: 1px solid #a0a0a0; -} - -nav.toc select option { - text-align: center; -} - -nav.toc input { - display: block; - height: 2em; - width: 90%; - width: calc(100% - 5em); - margin: 1.2em auto; - padding: 0 1em; - border: 1px solid #c9c9c9; - border-radius: 1em; - font-size: .83em; -} - -nav.toc > ul * { - margin: 0; -} - -nav.toc ul { - color: #404040; - padding: 0; - list-style: none; -} - -nav.toc ul .toctext { - color: inherit; - display: block; -} - -nav.toc ul a:hover { - color: #fcfcfc; - background-color: #4e4a4a; -} - -nav.toc ul.internal a { - color: inherit; - display: block; -} - -nav.toc ul.internal a:hover { - background-color: #d6d6d6; -} - -nav.toc ul.internal { - background-color: #e3e3e3; - box-shadow: inset -14px 0px 5px -12px rgb(210,210,210); - list-style: none; -} - -nav.toc ul.internal li.toplevel { - border-top: 1px solid #909090; - font-weight: bold; -} - -nav.toc ul.internal li.toplevel:first-child { - border-top: none; -} - -nav.toc .toctext { - padding-top: 0.3em; - padding-bottom: 0.3em; - padding-right: 1em; -} - -nav.toc ul .toctext { - padding-left: 1em; -} - -nav.toc ul ul .toctext { - padding-left: 2em; -} - -nav.toc ul ul ul .toctext { - padding-left: 3em; -} - -nav.toc li.current > .toctext { - border-top: 1px solid #c9c9c9; - border-bottom: 1px solid #c9c9c9; - color: #404040; - font-weight: bold; - background-color: white; -} - -article { - margin-left: 20em; - min-width: 20em; - max-width: 48em; - padding: 2em; -} - -article > header {} - -article > header div#topbar { - display: none; -} - -article > header nav ul { - display: inline-block; - list-style: none; - margin: 0; - padding: 0; -} - -article > header nav li { - display: inline-block; - padding-right: 0.2em; -} - -article > header nav li:before { - content: "»"; - padding-right: 0.2em; -} - -article > header .edit-page { - float: right; -} - -article > footer {} - -article > footer a.prev { - float: left; -} -article > footer a.next { - float: right; -} - -article > footer a .direction:after { - content: ": "; -} - -article hr { - margin: 1em 0; -} - -article section.docstring { - border: 1px solid #ddd; - margin: 0.5em 0; - padding: 0.5em; - border-radius: 3px; -} - -article section.docstring .docstring-header { - margin-bottom: 1em; -} - -article section.docstring .docstring-binding { - color: #333; - font-weight: bold; -} - -article section.docstring .docstring-category { - font-style: italic; -} - -article section.docstring a.source-link { - display: block; - font-weight: bold; -} - -.nav-anchor, -.nav-anchor:hover, -.nav-anchor:visited { - color: #333; -} - -/* - * Admonitions - * - * Colors (title, body) - * warning: #f0b37e #ffedcc (orange) - * note: #6ab0de #e7f2fa (blue) - * tip: #1abc9c #dbfaf4 (green) -*/ -.admonition { - border-radius: 3px; - background-color: #eeeeee; -} - -.admonition-title { - border-radius: 3px 3px 0 0; - background-color: #9b9b9b; - padding: 0.15em 0.5em; -} - -.admonition-text { - padding: 0.5em; -} - -.admonition-text > :first-child { - margin-top: 0; -} - -.admonition-text > :last-child { - margin-bottom: 0; -} - -.admonition > .admonition-title:before { - font-family: "FontAwesome"; - margin-right: 5px; - content: "\f06a"; -} - -.admonition.warning > .admonition-title { - background-color: #f0b37e; -} - -.admonition.warning { - background-color: #ffedcc; -} - -.admonition.note > .admonition-title { - background-color: #6ab0de; -} - -.admonition.note { - background-color: #e7f2fa; -} - -.admonition.tip > .admonition-title { - background-color: #1abc9c; -} - -.admonition.tip { - background-color: #dbfaf4; -} - - -/* footnotes */ -.footnote { - padding-left: 0.8em; - border-left: 2px solid #ccc; -} - -/* Search page */ -#search-results .category { - font-size: smaller; -} - -/* Overriding the block style of highligh.js. - * We have to override the padding and the background-color, since we style this - * part ourselves. Specifically, we style the
 surrounding the , while
- * highlight.js applies the .hljs style directly to the  tag.
- */
-.hljs {
-    background-color: transparent;
-    padding: 0;
-}
-
-@media only screen and (max-width: 768px) {
-    nav.toc {
-        position: fixed;
-        overflow-y: scroll;
-        width: 16em;
-        left: -16em;
-        -webkit-overflow-scrolling: touch;
-        -webkit-transition-property: left; /* Safari */
-        -webkit-transition-duration: 0.3s; /* Safari */
-        transition-property: left;
-        transition-duration: 0.3s;
-        -webkit-transition-timing-function: ease-out; /* Safari */
-        transition-timing-function: ease-out;
-        z-index: 2;
-    }
-
-    nav.toc.show {
-        left: 0;
-    }
-
-    article {
-        margin-left: 0;
-        padding: 3em 0.9em 0 0.9em; /* top right bottom left */
-        overflow-wrap: break-word;
-    }
-
-    article > header {
-        position: fixed;
-        left: 0;
-        z-index: 1;
-    }
-
-    article > header nav, hr {
-        display: none;
-    }
-
-    article > header div#topbar {
-        display: block; /* is mobile */
-        position: fixed;
-        width: 100%;
-        height: 1.5em;
-        padding-top: 1em;
-        padding-bottom: 1em;
-        background-color: #fcfcfc;
-        box-shadow: 0 1px 3px rgba(0,0,0,.26);
-        top: 0;
-        -webkit-transition-property: top; /* Safari */
-        -webkit-transition-duration: 0.3s; /* Safari */
-        transition-property: top;
-        transition-duration: 0.3s;
-    }
-
-    article > header div#topbar.headroom--unpinned.headroom--not-top.headroom--not-bottom {
-        top: -4em;
-        -webkit-transition-property: top; /* Safari */
-        -webkit-transition-duration: 0.7s; /* Safari */
-        transition-property: top;
-        transition-duration: 0.7s;
-    }
-
-    article > header div#topbar span {
-        position: fixed;
-        width: 80%;
-        height: 1.5em;
-        margin-top: -0.1em;
-        margin-left: 0.9em;
-        font-size: 1.2em;
-        overflow: hidden;
-    }
-
-    article > header div#topbar a.fa-bars {
-        float: right;
-        padding: 0.6em;
-        margin-top: -0.6em;
-        margin-right: 0.3em;
-        font-size: 1.5em;
-    }
-
-    article > header div#topbar a.fa-bars:visited {
-        color: #3091d1;
-    }
-
-    article table {
-        overflow-x: auto;
-        display: block;
-    }
-
-    article div.MathJax_Display {
-        overflow: scroll;
-    }
-
-    article span.MathJax {
-        overflow: hidden;
-    }
-}
-
-@media only screen and (max-width: 320px) {
-    body {
-        font-size: 15px;
-    }
-}
diff --git a/assets/documenter.js b/assets/documenter.js
deleted file mode 100644
index 5d31622f..00000000
--- a/assets/documenter.js
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Part of Documenter.jl
- *     https://github.com/JuliaDocs/Documenter.jl
- *
- * License: MIT
- */
-
-requirejs.config({
-    paths: {
-        'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min',
-        'jqueryui': 'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.0/jquery-ui.min',
-        'headroom': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.9.3/headroom.min',
-        'mathjax': 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS_HTML',
-        'highlight': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min',
-        'highlight-julia': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/languages/julia.min',
-        'highlight-julia-repl': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/languages/julia-repl.min',
-    },
-    shim: {
-        'mathjax' : {
-            exports: "MathJax"
-        },
-        'highlight-julia': ['highlight'],
-        'highlight-julia-repl': ['highlight'],
-    }
-});
-
-// Load MathJax
-require(['mathjax'], function(MathJax) {
-    MathJax.Hub.Config({
-      "tex2jax": {
-        inlineMath: [['$','$'], ['\\(','\\)']],
-        processEscapes: true
-      }
-    });
-    MathJax.Hub.Config({
-      config: ["MMLorHTML.js"],
-      jax: [
-        "input/TeX",
-        "output/HTML-CSS",
-        "output/NativeMML"
-      ],
-      extensions: [
-        "MathMenu.js",
-        "MathZoom.js",
-        "TeX/AMSmath.js",
-        "TeX/AMSsymbols.js",
-        "TeX/autobold.js",
-        "TeX/autoload-all.js"
-      ]
-    });
-    MathJax.Hub.Config({
-      TeX: { equationNumbers: { autoNumber: "AMS" } }
-    });
-})
-
-require(['jquery', 'highlight', 'highlight-julia', 'highlight-julia-repl'], function($, hljs) {
-    $(document).ready(function() {
-        hljs.initHighlighting();
-    })
-
-})
-
-// update the version selector with info from the siteinfo.js and ../versions.js files
-require(['jquery'], function($) {
-    $(document).ready(function() {
-        var version_selector = $("#version-selector");
-
-        // add the current version to the selector based on siteinfo.js, but only if the selector is empty
-        if (typeof DOCUMENTER_CURRENT_VERSION !== 'undefined' && $('#version-selector > option').length == 0) {
-            var option = $("");
-            version_selector.append(option);
-        }
-
-        if (typeof DOC_VERSIONS !== 'undefined') {
-            var existing_versions = $('#version-selector > option');
-            var existing_versions_texts = existing_versions.map(function(i,x){return x.text});
-            DOC_VERSIONS.forEach(function(each) {
-                var version_url = documenterBaseURL + "/../" + each;
-                var existing_id = $.inArray(each, existing_versions_texts);
-                // if not already in the version selector, add it as a new option,
-                // otherwise update the old option with the URL and enable it
-                if (existing_id == -1) {
-                    var option = $("");
-                    version_selector.append(option);
-                } else {
-                    var option = existing_versions[existing_id];
-                    option.value = version_url;
-                    option.disabled = false;
-                }
-            });
-        }
-
-        // only show the version selector if the selector has been populated
-        if ($('#version-selector > option').length > 0) {
-            version_selector.css("visibility", "visible");
-        }
-    })
-
-})
-
-// mobile
-require(['jquery', 'headroom'], function($, Headroom) {
-    $(document).ready(function() {
-        var navtoc = $("nav.toc");
-        $("nav.toc li.current a.toctext").click(function() {
-            navtoc.toggleClass('show');
-        });
-        $("article > header div#topbar a.fa-bars").click(function(ev) {
-            ev.preventDefault();
-            navtoc.toggleClass('show');
-            if (navtoc.hasClass('show')) {
-                var title = $("article > header div#topbar span").text();
-                $("nav.toc ul li a:contains('" + title + "')").focus();
-            }
-        });
-        $("article#docs").bind('click', function(ev) {
-            if ($(ev.target).is('div#topbar a.fa-bars')) {
-                return;
-            }
-            if (navtoc.hasClass('show')) {
-                navtoc.removeClass('show');
-            }
-        });
-        if ($("article > header div#topbar").css('display') == 'block') {
-            var headroom = new Headroom(document.querySelector("article > header div#topbar"), {"tolerance": {"up": 10, "down": 10}});
-            headroom.init();
-        }
-    })
-})
diff --git a/assets/favicon.ico b/assets/favicon.ico
deleted file mode 100644
index de4ffaa6..00000000
Binary files a/assets/favicon.ico and /dev/null differ
diff --git a/assets/idx_mnist_1.gif b/assets/idx_mnist_1.gif
deleted file mode 100644
index cb07ea9a..00000000
Binary files a/assets/idx_mnist_1.gif and /dev/null differ
diff --git a/assets/idx_mnist_10.gif b/assets/idx_mnist_10.gif
deleted file mode 100644
index 30a82291..00000000
Binary files a/assets/idx_mnist_10.gif and /dev/null differ
diff --git a/assets/idx_mnist_11.gif b/assets/idx_mnist_11.gif
deleted file mode 100644
index 16fb5d00..00000000
Binary files a/assets/idx_mnist_11.gif and /dev/null differ
diff --git a/assets/idx_mnist_12.gif b/assets/idx_mnist_12.gif
deleted file mode 100644
index 5c27777a..00000000
Binary files a/assets/idx_mnist_12.gif and /dev/null differ
diff --git a/assets/idx_mnist_13.gif b/assets/idx_mnist_13.gif
deleted file mode 100644
index e756647d..00000000
Binary files a/assets/idx_mnist_13.gif and /dev/null differ
diff --git a/assets/idx_mnist_14.gif b/assets/idx_mnist_14.gif
deleted file mode 100644
index fa31ee8b..00000000
Binary files a/assets/idx_mnist_14.gif and /dev/null differ
diff --git a/assets/idx_mnist_15.gif b/assets/idx_mnist_15.gif
deleted file mode 100644
index d3951161..00000000
Binary files a/assets/idx_mnist_15.gif and /dev/null differ
diff --git a/assets/idx_mnist_16.gif b/assets/idx_mnist_16.gif
deleted file mode 100644
index a83be728..00000000
Binary files a/assets/idx_mnist_16.gif and /dev/null differ
diff --git a/assets/idx_mnist_17.gif b/assets/idx_mnist_17.gif
deleted file mode 100644
index 3c33ead0..00000000
Binary files a/assets/idx_mnist_17.gif and /dev/null differ
diff --git a/assets/idx_mnist_18.gif b/assets/idx_mnist_18.gif
deleted file mode 100644
index 884c97bb..00000000
Binary files a/assets/idx_mnist_18.gif and /dev/null differ
diff --git a/assets/idx_mnist_19.gif b/assets/idx_mnist_19.gif
deleted file mode 100644
index 0b302664..00000000
Binary files a/assets/idx_mnist_19.gif and /dev/null differ
diff --git a/assets/idx_mnist_2.gif b/assets/idx_mnist_2.gif
deleted file mode 100644
index f9d66338..00000000
Binary files a/assets/idx_mnist_2.gif and /dev/null differ
diff --git a/assets/idx_mnist_20.gif b/assets/idx_mnist_20.gif
deleted file mode 100644
index 34a86408..00000000
Binary files a/assets/idx_mnist_20.gif and /dev/null differ
diff --git a/assets/idx_mnist_21.gif b/assets/idx_mnist_21.gif
deleted file mode 100644
index 38f575f5..00000000
Binary files a/assets/idx_mnist_21.gif and /dev/null differ
diff --git a/assets/idx_mnist_22.gif b/assets/idx_mnist_22.gif
deleted file mode 100644
index b688055e..00000000
Binary files a/assets/idx_mnist_22.gif and /dev/null differ
diff --git a/assets/idx_mnist_23.gif b/assets/idx_mnist_23.gif
deleted file mode 100644
index c405a6a3..00000000
Binary files a/assets/idx_mnist_23.gif and /dev/null differ
diff --git a/assets/idx_mnist_24.gif b/assets/idx_mnist_24.gif
deleted file mode 100644
index 3c29701d..00000000
Binary files a/assets/idx_mnist_24.gif and /dev/null differ
diff --git a/assets/idx_mnist_3.gif b/assets/idx_mnist_3.gif
deleted file mode 100644
index fdb8b2c3..00000000
Binary files a/assets/idx_mnist_3.gif and /dev/null differ
diff --git a/assets/idx_mnist_4.gif b/assets/idx_mnist_4.gif
deleted file mode 100644
index 9db30015..00000000
Binary files a/assets/idx_mnist_4.gif and /dev/null differ
diff --git a/assets/idx_mnist_5.gif b/assets/idx_mnist_5.gif
deleted file mode 100644
index 9d7fff1d..00000000
Binary files a/assets/idx_mnist_5.gif and /dev/null differ
diff --git a/assets/idx_mnist_6.gif b/assets/idx_mnist_6.gif
deleted file mode 100644
index 6f922f42..00000000
Binary files a/assets/idx_mnist_6.gif and /dev/null differ
diff --git a/assets/idx_mnist_7.gif b/assets/idx_mnist_7.gif
deleted file mode 100644
index 72edef1f..00000000
Binary files a/assets/idx_mnist_7.gif and /dev/null differ
diff --git a/assets/idx_mnist_8.gif b/assets/idx_mnist_8.gif
deleted file mode 100644
index c00eb527..00000000
Binary files a/assets/idx_mnist_8.gif and /dev/null differ
diff --git a/assets/idx_mnist_9.gif b/assets/idx_mnist_9.gif
deleted file mode 100644
index 802efd64..00000000
Binary files a/assets/idx_mnist_9.gif and /dev/null differ
diff --git a/assets/isic_in.png b/assets/isic_in.png
deleted file mode 100644
index ab159ce2..00000000
Binary files a/assets/isic_in.png and /dev/null differ
diff --git a/assets/isic_out.gif b/assets/isic_out.gif
deleted file mode 100644
index 5aa2dc62..00000000
Binary files a/assets/isic_out.gif and /dev/null differ
diff --git a/assets/logo.png b/assets/logo.png
deleted file mode 100644
index e742103c..00000000
Binary files a/assets/logo.png and /dev/null differ
diff --git a/assets/search.js b/assets/search.js
deleted file mode 100644
index 4dfee82a..00000000
--- a/assets/search.js
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Part of Documenter.jl
- *     https://github.com/JuliaDocs/Documenter.jl
- *
- * License: MIT
- */
-
-// parseUri 1.2.2
-// (c) Steven Levithan 
-// MIT License
-function parseUri (str) {
-	var	o   = parseUri.options,
-		m   = o.parser[o.strictMode ? "strict" : "loose"].exec(str),
-		uri = {},
-		i   = 14;
-
-	while (i--) uri[o.key[i]] = m[i] || "";
-
-	uri[o.q.name] = {};
-	uri[o.key[12]].replace(o.q.parser, function ($0, $1, $2) {
-		if ($1) uri[o.q.name][$1] = $2;
-	});
-
-	return uri;
-};
-parseUri.options = {
-	strictMode: false,
-	key: ["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"],
-	q:   {
-		name:   "queryKey",
-		parser: /(?:^|&)([^&=]*)=?([^&]*)/g
-	},
-	parser: {
-		strict: /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/,
-		loose:  /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/
-	}
-};
-
-requirejs.config({
-    paths: {
-        'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min',
-        'lunr': 'https://cdnjs.cloudflare.com/ajax/libs/lunr.js/2.1.3/lunr.min',
-        'lodash': 'https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.4/lodash.min',
-    }
-});
-
-var currentScript = document.currentScript;
-
-require(["jquery", "lunr", "lodash"], function($, lunr, _) {
-    $("#search-form").submit(function(e) {
-        e.preventDefault()
-    })
-
-    // list below is the lunr 2.1.3 list minus the intersect with names(Base)
-    // (all, any, get, in, is, which) and (do, else, for, let, where, while, with)
-    // ideally we'd just filter the original list but it's not available as a variable
-    lunr.stopWordFilter = lunr.generateStopWordFilter([
-        'a',
-        'able',
-        'about',
-        'across',
-        'after',
-        'almost',
-        'also',
-        'am',
-        'among',
-        'an',
-        'and',
-        'are',
-        'as',
-        'at',
-        'be',
-        'because',
-        'been',
-        'but',
-        'by',
-        'can',
-        'cannot',
-        'could',
-        'dear',
-        'did',
-        'does',
-        'either',
-        'ever',
-        'every',
-        'from',
-        'got',
-        'had',
-        'has',
-        'have',
-        'he',
-        'her',
-        'hers',
-        'him',
-        'his',
-        'how',
-        'however',
-        'i',
-        'if',
-        'into',
-        'it',
-        'its',
-        'just',
-        'least',
-        'like',
-        'likely',
-        'may',
-        'me',
-        'might',
-        'most',
-        'must',
-        'my',
-        'neither',
-        'no',
-        'nor',
-        'not',
-        'of',
-        'off',
-        'often',
-        'on',
-        'only',
-        'or',
-        'other',
-        'our',
-        'own',
-        'rather',
-        'said',
-        'say',
-        'says',
-        'she',
-        'should',
-        'since',
-        'so',
-        'some',
-        'than',
-        'that',
-        'the',
-        'their',
-        'them',
-        'then',
-        'there',
-        'these',
-        'they',
-        'this',
-        'tis',
-        'to',
-        'too',
-        'twas',
-        'us',
-        'wants',
-        'was',
-        'we',
-        'were',
-        'what',
-        'when',
-        'who',
-        'whom',
-        'why',
-        'will',
-        'would',
-        'yet',
-        'you',
-        'your'
-        ])
-
-    // add . as a separator, because otherwise "title": "Documenter.Anchors.add!"
-    // would not find anything if searching for "add!", only for the entire qualification
-    lunr.tokenizer.separator = /[\s\-\.]+/
-
-    // custom trimmer that doesn't strip @ and !, which are used in julia macro and function names
-    lunr.trimmer = function (token) {
-        return token.update(function (s) {
-            return s.replace(/^[^a-zA-Z0-9@!]+/, '').replace(/[^a-zA-Z0-9@!]+$/, '')
-        })
-    }
-
-    lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'juliaStopWordFilter')
-    lunr.Pipeline.registerFunction(lunr.trimmer, 'juliaTrimmer')
-
-    var index = lunr(function () {
-        this.ref('location')
-        this.field('title')
-        this.field('text')
-        documenterSearchIndex['docs'].forEach(function(e) {
-            this.add(e)
-        }, this)
-    })
-    var store = {}
-
-    documenterSearchIndex['docs'].forEach(function(e) {
-        store[e.location] = {title: e.title, category: e.category}
-    })
-
-    $(function(){
-        function update_search(querystring) {
-            tokens = lunr.tokenizer(querystring)
-            results = index.query(function (q) {
-                tokens.forEach(function (t) {
-                    q.term(t.toString(), {
-                        fields: ["title"],
-                        boost: 10,
-                        usePipeline: false,
-                        editDistance: 2,
-                        wildcard: lunr.Query.wildcard.NONE
-                    })
-                    q.term(t.toString(), {
-                        fields: ["text"],
-                        boost: 1,
-                        usePipeline: true,
-                        editDistance: 2,
-                        wildcard: lunr.Query.wildcard.NONE
-                    })
-                })
-            })
-            $('#search-info').text("Number of results: " + results.length)
-            $('#search-results').empty()
-            results.forEach(function(result) {
-                data = store[result.ref]
-                link = $('')
-                link.text(data.title)
-                link.attr('href', documenterBaseURL+'/'+result.ref)
-                cat = $('('+data.category+')')
-                li = $('
  • ').append(link).append(" ").append(cat) - $('#search-results').append(li) - }) - } - - function update_search_box() { - querystring = $('#search-query').val() - update_search(querystring) - } - - $('#search-query').keyup(_.debounce(update_search_box, 250)) - $('#search-query').change(update_search_box) - - search_query_uri = parseUri(window.location).queryKey["q"] - if(search_query_uri !== undefined) { - search_query = decodeURIComponent(search_query_uri.replace(/\+/g, '%20')) - $("#search-query").val(search_query) - } - update_search_box(); - }) -}) diff --git a/assets/style.css b/assets/style.css deleted file mode 100644 index f68cd983..00000000 --- a/assets/style.css +++ /dev/null @@ -1,25 +0,0 @@ -article > h1:not(:nth-child(2)) { - margin: 1.0em 0 0; -} -h1 { margin: 1.0em 0 0; } -h2 { margin: 1.0em 0 0; } -h3 { margin: 1.0em 0 0; } -article hr { - margin-top: 3em; - border-top-width: 1px; -} -article section.docstring { - margin: 0; - border: 0; - padding: 0; - margin-top: 1em; -} -article table { - text-align: center; -} -article p { - text-align: justify; -} -article > header .edit-page { - margin-left: 1em; -} diff --git a/assets/testpattern.png b/assets/testpattern.png deleted file mode 100644 index 6e90e071..00000000 Binary files a/assets/testpattern.png and /dev/null differ diff --git a/assets/tiny_ConvertEltype.png b/assets/tiny_ConvertEltype.png deleted file mode 100644 index d7dd2467..00000000 Binary files a/assets/tiny_ConvertEltype.png and /dev/null differ diff --git a/assets/tiny_Crop.png b/assets/tiny_Crop.png deleted file mode 100644 index a834f5a3..00000000 Binary files a/assets/tiny_Crop.png and /dev/null differ diff --git a/assets/tiny_CropNative.png b/assets/tiny_CropNative.png deleted file mode 100644 index 619e9f87..00000000 Binary files a/assets/tiny_CropNative.png and /dev/null differ diff --git a/assets/tiny_CropRatio.png b/assets/tiny_CropRatio.png deleted file mode 100644 index 406ab393..00000000 Binary files a/assets/tiny_CropRatio.png and /dev/null differ diff --git a/assets/tiny_CropSize.png b/assets/tiny_CropSize.png deleted file mode 100644 index 7e7d8705..00000000 Binary files a/assets/tiny_CropSize.png and /dev/null differ diff --git a/assets/tiny_ED1.png b/assets/tiny_ED1.png deleted file mode 100644 index a2eef292..00000000 Binary files a/assets/tiny_ED1.png and /dev/null differ diff --git a/assets/tiny_ED2.png b/assets/tiny_ED2.png deleted file mode 100644 index 6bf43b85..00000000 Binary files a/assets/tiny_ED2.png and /dev/null differ diff --git a/assets/tiny_FlipX.png b/assets/tiny_FlipX.png deleted file mode 100644 index a6e5cc03..00000000 Binary files a/assets/tiny_FlipX.png and /dev/null differ diff --git a/assets/tiny_FlipY.png b/assets/tiny_FlipY.png deleted file mode 100644 index 2705e498..00000000 Binary files a/assets/tiny_FlipY.png and /dev/null differ diff --git a/assets/tiny_RCropRatio.png b/assets/tiny_RCropRatio.png deleted file mode 100644 index 8c4879b3..00000000 Binary files a/assets/tiny_RCropRatio.png and /dev/null differ diff --git a/assets/tiny_Resize.png b/assets/tiny_Resize.png deleted file mode 100644 index e5fa30c0..00000000 Binary files a/assets/tiny_Resize.png and /dev/null differ diff --git a/assets/tiny_Rotate.png b/assets/tiny_Rotate.png deleted file mode 100644 index 09b72ff4..00000000 Binary files a/assets/tiny_Rotate.png and /dev/null differ diff --git a/assets/tiny_Rotate180.png b/assets/tiny_Rotate180.png deleted file mode 100644 index 8b1b406f..00000000 Binary files a/assets/tiny_Rotate180.png and /dev/null differ diff --git a/assets/tiny_Rotate270.png b/assets/tiny_Rotate270.png deleted file mode 100644 index ff8c9351..00000000 Binary files a/assets/tiny_Rotate270.png and /dev/null differ diff --git a/assets/tiny_Rotate90.png b/assets/tiny_Rotate90.png deleted file mode 100644 index 83777f2b..00000000 Binary files a/assets/tiny_Rotate90.png and /dev/null differ diff --git a/assets/tiny_Scale.png b/assets/tiny_Scale.png deleted file mode 100644 index 7f8a4861..00000000 Binary files a/assets/tiny_Scale.png and /dev/null differ diff --git a/assets/tiny_ShearX.png b/assets/tiny_ShearX.png deleted file mode 100644 index 807abc4a..00000000 Binary files a/assets/tiny_ShearX.png and /dev/null differ diff --git a/assets/tiny_ShearY.png b/assets/tiny_ShearY.png deleted file mode 100644 index 576a880b..00000000 Binary files a/assets/tiny_ShearY.png and /dev/null differ diff --git a/assets/tiny_Zoom.png b/assets/tiny_Zoom.png deleted file mode 100644 index 64b7eab6..00000000 Binary files a/assets/tiny_Zoom.png and /dev/null differ diff --git a/assets/tiny_pattern.png b/assets/tiny_pattern.png deleted file mode 100644 index c142aaa2..00000000 Binary files a/assets/tiny_pattern.png and /dev/null differ diff --git a/background/index.html b/background/index.html deleted file mode 100644 index f80d4faf..00000000 --- a/background/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -Background and Motivation · Augmentor.jl

    Background and Motivation

    Background and Motivation

    In this section we will discuss the concept of image augmentation in general. In particular we will introduce some terminology and useful definitions.

    What is Image Augmentation?

    The term data augmentation is commonly used to describe the process of repeatedly applying various transformations to some dataset, with the hope that the output (i.e. the newly generated observations) bias the model towards learning better features. Depending on the structure and semantics of the data, coming up with such transformations can be a challenge by itself.

    Images are a special class of data that exhibit some interesting properties in respect to their structure. For example do the dimensions of an image (i.e. the pixel) exhibit a spatial relationship to each other. As such, a lot of commonly used augmentation strategies for image data revolve around affine transformations, such as translations or rotations. Because images are such a popular and special case of data, they deserve their own sub-category of data augmentation, which we will unsurprisingly refer to as image augmentation.

    The general idea is the following: if we want our model to generalize well, then we should design the learning process in such a way as to bias the model into learning such transformation-equivariant properties. One way to do this is via the design of the model itself, which for example was idea behind convolutional neural networks. An orthogonal approach to bias the model to learn about this equivariance - and the focus of this package - is by using label-preserving transformations.

    Label-preserving Transformations

    Before attempting to train a model using some augmentation pipeline, it's a good idea to invest some time in deciding on an appropriate set of transformations to choose from. Some of these transformations also have parameters to tune, and we should also make sure that we settle on a decent set of values for those.

    What constitutes as "decent" depends on the dataset. In general we want the augmented images to be fairly dissimilar to the originals. However, we need to be careful that the augmented images still visually represent the same concept (and thus label). If a pipeline only produces output images that have this property we call this pipeline label-preserving.

    Example: MNIST Handwritten Digits

    Consider the following example from the MNIST database of handwritten digits [MNIST1998]. Our input image clearly represents its associated label "6". If we were to use the transformation Rotate180 in our augmentation pipeline for this type of images, we could end up with the situation depicted by the image on the right side.

    using Augmentor, MLDatasets
    -input_img  = MNIST.convert2image(MNIST.traintensor(19))
    -output_img = augment(input_img, Rotate180())
    Input (input_img)Output (output_img)
    inputoutput

    To a human, this newly transformed image clearly represents the label "9", and not "6" like the original image did. In image augmentation, however, the assumption is that the output of the pipeline has the same label as the input. That means that in this example we would tell our model that the correct answer for the image on the right side is "6", which is clearly undesirable for obvious reasons.

    Thus, for the MNIST dataset, the transformation Rotate180 is not label-preserving and should not be used for augmentation.

    [MNIST1998]

    LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. "The MNIST database of handwritten digits" Website. 1998.

    Example: ISIC Skin Lesions

    On the other hand, the exact same transformation could very well be label-preserving for other types of images. Let us take a look at a different set of image data; this time from the medical domain.

    The International Skin Imaging Collaboration [ISIC] hosts a large collection of publicly available and labeled skin lesion images. A subset of that data was used in 2016's ISBI challenge [ISBI2016] where a subtask was lesion classification.

    Let's consider the following input image on the left side. It shows a photo of a skin lesion that was taken from above. By applying the Rotate180 operation to the input image, we end up with a transformed version shown on the right side.

    using Augmentor, ISICArchive
    -input_img  = get(ImageThumbnailRequest(id = "5592ac599fc3c13155a57a85"))
    -output_img = augment(input_img, Rotate180())
    Input (input_img)Output (output_img)
    inputoutput

    After looking at both images, one could argue that the orientation of the camera is somewhat arbitrary as long as it points to the lesion at an approximately orthogonal angle. Thus, for the ISIC dataset, the transformation Rotate180 could be considered as label-preserving and very well be tried for augmentation. Of course this does not guarantee that it will improve training time or model accuracy, but the point is that it is unlikely to hurt.

    [ISIC]

    https://isic-archive.com/

    [ISBI2016]

    Gutman, David; Codella, Noel C. F.; Celebi, Emre; Helba, Brian; Marchetti, Michael; Mishra, Nabin; Halpern, Allan. "Skin Lesion Analysis toward Melanoma Detection: A Challenge at the International Symposium on Biomedical Imaging (ISBI) 2016, hosted by the International Skin Imaging Collaboration (ISIC)". eprint arXiv:1605.01397. 2016.

    diff --git a/generated/mnist_1.png b/generated/mnist_1.png deleted file mode 100644 index 34ad3143..00000000 Binary files a/generated/mnist_1.png and /dev/null differ diff --git a/generated/mnist_elastic.ipynb b/generated/mnist_elastic.ipynb deleted file mode 100644 index bdd079c5..00000000 --- a/generated/mnist_elastic.ipynb +++ /dev/null @@ -1,585 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# [MNIST: Elastic Distortions](@id elastic)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this example we are going to use Augmentor on the famous\n", - "**MNIST database of handwritten digits** [MNIST1998] to\n", - "reproduce the elastic distortions discussed in [SIMARD2003].\n", - "It may be interesting to point out, that the way Augmentor\n", - "implements distortions is a little different to how it is\n", - "described by the authors of the paper.\n", - "This is for a couple of reasons, most notably that we want the\n", - "parameters for our deformations to be independent of the size\n", - "of image it is applied on. As a consequence the\n", - "parameter-numbers specified in the paper are not 1-to-1\n", - "transferable to Augmentor." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If the effects are sensible for the dataset, then applying\n", - "elastic distortions can be a really effective way to improve\n", - "the generalization ability of the network.\n", - "That said, our implementation of [`ElasticDistortion`](@ref)\n", - "has a lot of possible parameters to choose from. To that end,\n", - "we will introduce a simple strategy for interactively\n", - "exploring the parameter space on our dataset of interest." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading the MNIST Trainingset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In order to access and visualize the MNIST images we employ\n", - "the help of two additional Julia packages. In the interest of\n", - "time and space we will not go into great detail about their\n", - "functionality. Feel free to click on their respective names to\n", - "find out more information about the utility they can provide.\n", - "\n", - "- [Images.jl](https://github.com/JuliaImages/Images.jl) will\n", - " provide us with the necessary tools for working with image\n", - " data in Julia.\n", - "\n", - "- [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl)\n", - " has an MNIST submodule that offers a convenience interface\n", - " to read the MNIST database." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The function `MNIST.traintensor` returns the MNIST training\n", - "images corresponding to the given indices as a\n", - "multi-dimensional array. These images are stored in the native\n", - "horizontal-major memory layout as a single floating point\n", - "array, where all values are scaled to be between 0.0 and 1.0." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(train_tensor) = \"28×28×60000 Array{N0f8,3}\"\n" - ] - } - ], - "source": [ - "using Images, MLDatasets\n", - "train_tensor = MNIST.traintensor()\n", - "@show summary(train_tensor);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This horizontal-major format is the standard way of utilizing\n", - "this dataset for training machine learning models.\n", - "In this tutorial, however, we are more interested in working\n", - "with the MNIST images as actual Julia images in vertical-major\n", - "layout, and as black digits on white background." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can convert the \"tensor\" to a `Colorant` array using the\n", - "provided function `MNIST.convert2image`.\n", - "This way, Julia knows we are dealing with image data and can\n", - "tell programming environments such as Juypter how to visualize\n", - "it. If you are working in the terminal you may want to use the\n", - "package [ImageInTerminal.jl](https://github.com/JuliaImages/ImageInTerminal.jl)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAHAAAABwCAAAAADji6uXAAAABGdBTUEAALGPC/xhBQAAAAJiS0dEAP+Hj8y/AAAAB3RJTUUH4gcJBioDqp9ikwAAAZxJREFUaN7t2r0rRXEcx/Hb9ZASsioGmVhkUMqiKDaFwWSQQVZFpNgsJv4ByoiMBhmZFBkUi/K0SEomT+9POXW7nTqXuudBn9dyb93ueS/3d87vfM/NfcUs56CDDjrooIMOOpiC4DueCqxgDsO4wzhyqMEyHEw+eIMrbGIKY8iHaMEI9L4OvTiCg8kGT9GIfIRKbGHnxzEuEXZMB+MNamG3ISzSgyFogdcj6sfnYDJB2cMkNhDEuvAKfX4BnRAcTG9QXvAJHVjBbZQacDAdwcAsFOzDBxzMXlCLXTFFD+Bg9oJyDV1wtfGdwDp0UnAwO0HZRQOCC/Iq7uFgdoJyjgEE0WncwsHsBOUZuhGtgKL9cDBbwUA1FNRr2DDIwXQGz7CEQQSLvxNRG2QHkw9qaDeDJhQOiqqgQVHU9x1MLviANbSieNDXjX2UchwH4w8+4hDtCBvSalP8mwGDg/EFNWAfRdiQXQ+yNLh9Q6khB+MLnkAPHptRHKrFAoIB+184WP6g/gRQGOnAPBahm5e/hhyML1huDjrooIP/MPgNqMMZJ8UsgboAAAAASUVORK5CYII=", - "text/plain": [ - "28×28 Array{Gray{N0f8},2}:\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " ⋮ ⋱ \n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train_images = MNIST.convert2image(train_tensor)\n", - "img_1 = train_images[:,:,1] # show first image" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualizing the Effects" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before applying an operation (or pipeline of operations) on\n", - "some dataset to train a network, we strongly recommend\n", - "investing some time in selecting a decent set of hyper\n", - "parameters for the operation(s). A useful tool for tasks like\n", - "this is the package [Interact.jl](https://github.com/JuliaGizmos/Interact.jl).\n", - "We will use this package to define a number of widgets for\n", - "controlling the parameters to our operation." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that while the code below only focuses on configuring\n", - "the parameters of a single operation, specifically\n", - "[`ElasticDistortion`](@ref), it could also be adapted to tweak\n", - "a whole pipeline. Take a look at the corresponding section in\n", - "[High-level Interface](@ref pipeline) for more information\n", - "on how to define and use a pipeline." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
    \n", - " \n", - " \n", - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "30a10ff6-6764-4863-a6c9-618c2b1ed400", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Checkbox(1: \"input\" = true Bool , \"unpaused\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "1.460603952407837" - ] - }, - "metadata": { - "comm_id": "6db94edf-d86b-490a-8471-75cf846d4943", - "reactive": true - }, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "ff738bd4-b2b6-413f-90e1-3034415e4593", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Options{:SelectionSlider,Int64}(4: \"input-2\" = 50 Int64 , \"image_index\", 50, \"50\", 50, Interact.OptionDict(DataStructures.OrderedDict(\"1\"=>1,\"2\"=>2,\"3\"=>3,\"4\"=>4,\"5\"=>5,\"6\"=>6,\"7\"=>7,\"8\"=>8,\"9\"=>9,\"10\"=>10…), Dict(68=>\"68\",2=>\"2\",89=>\"89\",11=>\"11\",39=>\"39\",46=>\"46\",85=>\"85\",25=>\"25\",55=>\"55\",42=>\"42\"…)), Any[], Any[], true, \"horizontal\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "e7fa56a2-c97b-42d7-9739-1800fd051324", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Options{:SelectionSlider,Int64}(6: \"input-3\" = 11 Int64 , \"grid_size\", 11, \"11\", 9, Interact.OptionDict(DataStructures.OrderedDict(\"3\"=>3,\"4\"=>4,\"5\"=>5,\"6\"=>6,\"7\"=>7,\"8\"=>8,\"9\"=>9,\"10\"=>10,\"11\"=>11,\"12\"=>12…), Dict(16=>\"16\",11=>\"11\",7=>\"7\",9=>\"9\",10=>\"10\",19=>\"19\",17=>\"17\",8=>\"8\",6=>\"6\",4=>\"4\"…)), Any[], Any[], true, \"horizontal\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "1b3af9c5-01b2-44e0-9816-5b7bd36fd401", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Options{:SelectionSlider,Float64}(8: \"input-4\" = 0.3 Float64 , \"scale\", 0.3, \"0.3\", 3, Interact.OptionDict(DataStructures.OrderedDict(\"0.1\"=>0.1,\"0.2\"=>0.2,\"0.3\"=>0.3,\"0.4\"=>0.4,\"0.5\"=>0.5), Dict(0.4=>\"0.4\",0.3=>\"0.3\",0.5=>\"0.5\",0.2=>\"0.2\",0.1=>\"0.1\")), Any[], Any[], true, \"horizontal\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "cc5ccba3-5085-4cc3-9e1f-4d33c18ae997", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Options{:SelectionSlider,Int64}(10: \"input-5\" = 3 Int64 , \"sigma\", 3, \"3\", 3, Interact.OptionDict(DataStructures.OrderedDict(\"1\"=>1,\"2\"=>2,\"3\"=>3,\"4\"=>4,\"5\"=>5), Dict(4=>\"4\",2=>\"2\",3=>\"3\",5=>\"5\",1=>\"1\")), Any[], Any[], true, \"horizontal\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "77b30ccc-f7fa-4101-b0c7-577fc6a54c94", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Options{:SelectionSlider,Int64}(12: \"input-6\" = 3 Int64 , \"iterations\", 3, \"3\", 3, Interact.OptionDict(DataStructures.OrderedDict(\"1\"=>1,\"2\"=>2,\"3\"=>3,\"4\"=>4,\"5\"=>5,\"6\"=>6), Dict(4=>\"4\",2=>\"2\",3=>\"3\",5=>\"5\",6=>\"6\",1=>\"1\")), Any[], Any[], true, \"horizontal\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "347187f2-b9a2-42f8-b73d-afa4130b038a", - "version_major": 2, - "version_minor": 0 - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [], - "text/plain": [ - "Interact.Checkbox(14: \"input-7\" = true Bool , \"free_border\", true)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAHAAAABwCAAAAADji6uXAAAABGdBTUEAALGPC/xhBQAAAAJiS0dEAP+Hj8y/AAAAB3RJTUUH4gcJBioRWSYT2wAAAmFJREFUaN7t2kuITmEcgPFhXIaQxi3URCJLNcYKpSzYiJSNhVlMWChKrBg1NUlZTEopLGTBwkaRbNzKAkUi7MYtJbdi3C/jecpbp+kb34y+OeeM/r/NTN9M5zk17znve94zdb05q4tgBCMYwQhG8D8NfsITvMVPRLDcwTe4jp1YgpkYiRE4iMu4gcfwRCJYnuB7XMFerMZUjEL9H0YbMQPLcQAPEcH8g1/Q9+L9inNYionIhrLB9P04bMY9RDD/oH/oz8h+9hxe6NMwFgbHowmtWIlJMObPPbGz6EEE8w+exitkP/uAi1gHD74CW7EBzfCCN+aJLMQZfERvBREc+mB/vHlfxSUcwR4sxhikCdgTOoRn6O84ESwu+AsOnlPYAifgdLOei+1w4eTv/e04ESwu6AL4GDx4Cs3DNngST1HtQSaCxQY7kQZKWjCtx1040VYbLBEsPrgLBqfACdfJdhnacR/fEcFyB51Uj8LJdwfmYwJcELuYcuHkzaFmG0MRrHnQA2UHxnGsxRw4gFrgA+g3RLCcwUoeYT/Sw4ybQS8RweET1At4E3DgOEn3fZiNYLmDLpxuYhYcNG40+MIkguUP+gLkNXxh4ubeaBg9CTcfIliOYDf2wc2e2/gBP7+GDrgwTi+61AZvAv+8qRDBmgbfwZgPLx6sC+exCZORHkjdUHejfSNuodoCKoL5BX0o2Y0UnI3pcDPWidavC7AKh+ENeyCbQhHML+hFfgeLkH0p6SBxY93QBTzAQCIRzD8oL2I3CtagAW7EnsBgAxEsLij/icCbQKWXxxEcHsGhEMGa+w148aNJP+CmgAAAAABJRU5ErkJggg==", - "text/plain": [ - "28×28 Array{Gray{N0f8},2}:\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " ⋮ ⋱ \n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) … Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)\n", - " Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0) Gray{N0f8}(1.0)" - ] - }, - "execution_count": 3, - "metadata": { - "comm_id": "0e4826c5-8b0f-4661-b3f1-6b1b1706af31", - "reactive": true - }, - "output_type": "execute_result" - } - ], - "source": [ - "# These two package will provide us with the capabilities\n", - "# to perform interactive visualisations in a jupyter notebook\n", - "using Augmentor, Interact, Reactive\n", - "\n", - "# The manipulate macro will turn the parameters of the\n", - "# loop into interactive widgets.\n", - "@manipulate for\n", - " unpaused = true,\n", - " ticks = fpswhen(signal(unpaused), 5.),\n", - " image_index = 1:100,\n", - " grid_size = 3:20,\n", - " scale = .1:.1:.5,\n", - " sigma = 1:5,\n", - " iterations = 1:6,\n", - " free_border = true\n", - " op = ElasticDistortion(grid_size, grid_size, # equal width & height\n", - " sigma = sigma,\n", - " scale = scale,\n", - " iter = iterations,\n", - " border = free_border)\n", - " augment(train_images[:, :, image_index], op)\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With just a few simple lines of code, you\n", - "created a simple interactive tool to visualize your image\n", - "augmentation pipeline. Once you found a set of parameters that\n", - "you think are appropriate for your dataset you can go ahead\n", - "and train your model." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## References\n", - "\n", - "**MNIST1998**: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. [\"The MNIST database of handwritten digits\"](http://yann.lecun.com/exdb/mnist/) Website. 1998.\n", - "\n", - "**SIMARD2003**: Simard, Patrice Y., David Steinkraus, and John C. Platt. [\"Best practices for convolutional neural networks applied to visual document analysis.\"](https://www.microsoft.com/en-us/research/publication/best-practices-for-convolutional-neural-networks-applied-to-visual-document-analysis/) ICDAR. Vol. 3. 2003." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 0.6.3-pre", - "language": "julia", - "name": "julia-0.6" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "0.6.4" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/generated/mnist_elastic/index.html b/generated/mnist_elastic/index.html deleted file mode 100644 index 776e9b58..00000000 --- a/generated/mnist_elastic/index.html +++ /dev/null @@ -1,27 +0,0 @@ - -MNIST: Elastic Distortions · Augmentor.jl

    MNIST: Elastic Distortions

    MNIST: Elastic Distortions

    In this example we are going to use Augmentor on the famous MNIST database of handwritten digits [MNIST1998] to reproduce the elastic distortions discussed in [SIMARD2003]. It may be interesting to point out, that the way Augmentor implements distortions is a little different to how it is described by the authors of the paper. This is for a couple of reasons, most notably that we want the parameters for our deformations to be independent of the size of image it is applied on. As a consequence the parameter-numbers specified in the paper are not 1-to-1 transferable to Augmentor.

    If the effects are sensible for the dataset, then applying elastic distortions can be a really effective way to improve the generalization ability of the network. That said, our implementation of ElasticDistortion has a lot of possible parameters to choose from. To that end, we will introduce a simple strategy for interactively exploring the parameter space on our dataset of interest.

    Note

    This tutorial was designed to be performed in a Juypter notebook. You can find a link to the Juypter version of this tutorial in the top right corner of this page.

    Loading the MNIST Trainingset

    In order to access and visualize the MNIST images we employ the help of two additional Julia packages. In the interest of time and space we will not go into great detail about their functionality. Feel free to click on their respective names to find out more information about the utility they can provide.

    • Images.jl will provide us with the necessary tools for working with image data in Julia.

    • MLDatasets.jl has an MNIST submodule that offers a convenience interface to read the MNIST database.

    The function MNIST.traintensor returns the MNIST training images corresponding to the given indices as a multi-dimensional array. These images are stored in the native horizontal-major memory layout as a single floating point array, where all values are scaled to be between 0.0 and 1.0.

    using Images, MLDatasets
    -train_tensor = MNIST.traintensor()
    -@show summary(train_tensor);
    summary(train_tensor) = "28×28×60000 Array{N0f8,3}"

    This horizontal-major format is the standard way of utilizing this dataset for training machine learning models. In this tutorial, however, we are more interested in working with the MNIST images as actual Julia images in vertical-major layout, and as black digits on white background.

    We can convert the "tensor" to a Colorant array using the provided function MNIST.convert2image. This way, Julia knows we are dealing with image data and can tell programming environments such as Juypter how to visualize it. If you are working in the terminal you may want to use the package ImageInTerminal.jl

    train_images = MNIST.convert2image(train_tensor)
    -img_1 = train_images[:,:,1] # show first image

    first image

    Visualizing the Effects

    Before applying an operation (or pipeline of operations) on some dataset to train a network, we strongly recommend investing some time in selecting a decent set of hyper parameters for the operation(s). A useful tool for tasks like this is the package Interact.jl. We will use this package to define a number of widgets for controlling the parameters to our operation.

    Note that while the code below only focuses on configuring the parameters of a single operation, specifically ElasticDistortion, it could also be adapted to tweak a whole pipeline. Take a look at the corresponding section in High-level Interface for more information on how to define and use a pipeline.

    # These two package will provide us with the capabilities
    -# to perform interactive visualisations in a jupyter notebook
    -using Augmentor, Interact, Reactive
    -
    -# The manipulate macro will turn the parameters of the
    -# loop into interactive widgets.
    -@manipulate for
    -        unpaused = true,
    -        ticks = fpswhen(signal(unpaused), 5.),
    -        image_index = 1:100,
    -        grid_size = 3:20,
    -        scale = .1:.1:.5,
    -        sigma = 1:5,
    -        iterations = 1:6,
    -        free_border = true
    -    op = ElasticDistortion(grid_size, grid_size, # equal width & height
    -                           sigma = sigma,
    -                           scale = scale,
    -                           iter = iterations,
    -                           border = free_border)
    -    augment(train_images[:, :, image_index], op)
    -end
    -nothing # hide

    Executing the code above in a Juypter notebook will result in the following interactive visualisation. You can now use the sliders to investigate the effects that different parameters have on the MNIST training images.

    Tip

    You should always use your training set to do this kind of visualisation (not the test test!). Otherwise you are likely to achieve overly optimistic (i.e. biased) results during training.

    interact

    Congratulations! With just a few simple lines of code, you created a simple interactive tool to visualize your image augmentation pipeline. Once you found a set of parameters that you think are appropriate for your dataset you can go ahead and train your model.

    References

    [MNIST1998]

    LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. "The MNIST database of handwritten digits" Website. 1998.

    [SIMARD2003]

    Simard, Patrice Y., David Steinkraus, and John C. Platt. "Best practices for convolutional neural networks applied to visual document analysis." ICDAR. Vol. 3. 2003.

    diff --git a/generated/mnist_knet.ipynb b/generated/mnist_knet.ipynb deleted file mode 100644 index fa5fb9d4..00000000 --- a/generated/mnist_knet.ipynb +++ /dev/null @@ -1,1265 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# MNIST: Knet.jl CNN" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this tutorial we will adapt the\n", - "[MNIST example](http://denizyuret.github.io/Knet.jl/latest/tutorial.html#Convolutional-neural-network-1)\n", - "from [Knet.jl](https://github.com/denizyuret/Knet.jl)\n", - "to utilize a custom augmentation pipeline.\n", - "In order to showcase the effect that image augmentation can\n", - "have on a neural network's ability to generalize, we will\n", - "limit the training set to just the first 500 images (of the\n", - "available 60,000!). For more information on the dataset see\n", - "[MNIST1998]." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Preparing the MNIST dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In order to access, prepare, and visualize the MNIST images we\n", - "employ the help of three additional Julia packages. In the\n", - "interest of time and space we will not go into great detail\n", - "about their functionality. Feel free to click on their\n", - "respective names to find out more information about the\n", - "utility they can provide.\n", - "\n", - "- [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl)\n", - " has an MNIST submodule that offers a convenience interface\n", - " to read the MNIST database.\n", - "\n", - "- [Images.jl](https://github.com/JuliaImages/Images.jl) will\n", - " provide us with the necessary tools to process and display\n", - " the image data in Julia / Juypter.\n", - "\n", - "- [MLDataUtils.jl](https://github.com/JuliaML/MLDataUtils.jl)\n", - " implements a variety of functions to convert and partition\n", - " Machine Learning datasets. This will help us prepare the\n", - " MNIST data to be used with Knet.jl." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "using Images, MLDatasets, MLDataUtils\n", - "srand(42);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As you may have seen previously in the\n", - "[elastic distortions tutorial](@ref elastic), the function\n", - "`MNIST.traintensor` returns the MNIST training images\n", - "corresponding to the given indices as a multi-dimensional\n", - "array. These images are stored in the native horizontal-major\n", - "memory layout as a single array. Because we specify that\n", - "the `eltype` of that array should be `Float32`, all the\n", - "individual values are scaled to be between `0.0` and `1.0`.\n", - "Also note, how the observations are laid out along the last\n", - "array dimension" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(MNIST.traintensor(Float32, 1:500)) = \"28×28×500 Array{Float32,3}\"\n" - ] - } - ], - "source": [ - "@show summary(MNIST.traintensor(Float32, 1:500));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The corresponding label of each image is stored as an integer\n", - "value between `0` and `9`. That means that if the label has\n", - "the value `3`, then the corresponding image is known to be a\n", - "handwritten \"3\". To show a more concrete example, the\n", - "following code reveals that the first training image denotes a\n", - "\"5\" and the second training image a \"0\" (etc)." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(MNIST.trainlabels(1:500)) = \"500-element Array{Int64,1}\"\n", - "First eight labels: 5, 0, 4, 1, 9, 2, 1, 3\n" - ] - } - ], - "source": [ - "@show summary(MNIST.trainlabels(1:500))\n", - "println(\"First eight labels: \", join(MNIST.trainlabels(1:8),\", \"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For Knet we will require a slightly format for the images\n", - "and also the labels. More specifically, we add an additional\n", - "singleton dimension of length 1 to our image array. Think of\n", - "this as our single color channel (because MNIST images are gray).\n", - "Additionally we will convert our labels to proper 1-based indices.\n", - "This is because some functions provided by Knet expect the labels\n", - "to be in this format. We will do all this by creating a little\n", - "utility function that we will name `prepare_mnist`." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "prepare_mnist" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "\"\"\"\n", - " prepare_mnist(images, labels) -> (X, Y)\n", - "\n", - "Change the dimension layout x1×x2×N of the given array\n", - "`images` to x1×x2×1×N and return the result as `X`.\n", - "The given integer vector `labels` is transformed into\n", - "an integer vector denoting 1-based class indices.\n", - "\"\"\"\n", - "function prepare_mnist(images, labels)\n", - " X = reshape(images, (28, 28, 1, :))\n", - " Y = convertlabel(LabelEnc.Indices{Int8}, labels, 0:9)\n", - " X, Y\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With `prepare_mnist` defined, we can now use it in conjunction\n", - "with the functions in the `MLDatasets.MNIST` sub-module to load\n", - "and prepare our training set. Recall that for this tutorial only\n", - "the first 500 images of the training set will be used." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(train_x) = \"28×28×1×500 Array{Float32,4}\"\n", - "summary(train_y) = \"500-element Array{Int8,1}\"\n" - ] - }, - { - "data": { - "text/html": [ - "
    (a vector displayed as a row to save space)
    " - ], - "text/plain": [ - "8-element Array{Array{ColorTypes.Gray{Float32},2},1}:\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train_x, train_y = prepare_mnist(MNIST.traintensor(Float32, 1:500), MNIST.trainlabels(1:500))\n", - "@show summary(train_x) summary(train_y);\n", - "[MNIST.convert2image(train_x[:,:,1,i]) for i in 1:8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly, we use `MNIST.testtensor` and `MNIST.testlabels`\n", - "to load the full MNIST test set. We will utilize that data to\n", - "measure how well the network is able to generalize with and\n", - "without augmentation." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(test_x) = \"28×28×1×10000 Array{Float32,4}\"\n", - "summary(test_y) = \"10000-element Array{Int8,1}\"\n" - ] - }, - { - "data": { - "text/html": [ - "
    (a vector displayed as a row to save space)
    " - ], - "text/plain": [ - "8-element Array{Array{ColorTypes.Gray{Float32},2},1}:\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "test_x, test_y = prepare_mnist(MNIST.testtensor(Float32), MNIST.testlabels())\n", - "@show summary(test_x) summary(test_y);\n", - "[MNIST.convert2image(test_x[:,:,1,i]) for i in 1:8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining the Network" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With the dataset prepared, we can now define and instantiate\n", - "our neural network. To keep things simple, we will use the same\n", - "convolutional network as defined in the\n", - "[MNIST example](http://denizyuret.github.io/Knet.jl/latest/tutorial.html#Convolutional-neural-network-1)\n", - "of the Knet.jl package." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "using Knet" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The first thing we will do is define the forward pass through\n", - "the network. This will effectively outline the computation\n", - "graph of the network architecture. Note how this does not\n", - "define some details, such as the number of neurons per layer.\n", - "We will define those later when initializing our\n", - "vector of weight arrays `w`." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "forward" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "\"\"\"\n", - " forward(w, x) -> a\n", - "\n", - "Compute the forward pass for the given minibatch `x` by using the\n", - "neural network parameters in `w`. The resulting (unnormalized)\n", - "activations of the last layer are returned as `a`.\n", - "\"\"\"\n", - "function forward(w, x)\n", - " # conv1 (2x2 maxpool)\n", - " a1 = pool(relu.(conv4(w[1], x) .+ w[2]))\n", - " # conv2 (2x2 maxpool)\n", - " a2 = pool(relu.(conv4(w[3], a1) .+ w[4]))\n", - " # dense1 (relu)\n", - " a3 = relu.(w[5] * mat(a2) .+ w[6])\n", - " # dense2 (identity)\n", - " a4 = w[7] * a3 .+ w[8]\n", - " return a4\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In order to be able to train our network we need to choose a\n", - "cost function. Because this is a classification problem we will\n", - "use the negative log-likelihood (provided by `Knet.nll`).\n", - "With the cost function defined we can the simply use the\n", - "higher-order function `grad` to create a new function `costgrad`\n", - "that computes us the corresponding gradients." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(::gradfun) (generic function with 1 method)" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "\"\"\"\n", - " cost(w, x, y) -> AbstractFloat\n", - "\n", - "Compute the per-instance negative log-likelihood for the data\n", - "in the minibatch `(x, y)` given the network with the current\n", - "parameters in `w`.\n", - "\"\"\"\n", - "cost(w, x, y) = nll(forward(w, x), y)\n", - "costgrad = grad(cost)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Aside from the cost function that we need for training, we\n", - "would also like a more interpretable performance measurement.\n", - "In this tutorial we will use \"accuracy\" for its simplicity\n", - "and because we know that the class distribution for MNIST\n", - "is close to uniform." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "acc" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "\"\"\"\n", - " acc(w, X, Y; [batchsize]) -> Float64\n", - "\n", - "Compute the accuracy for the data in `(X,Y)` given the network\n", - "with the current parameters in `w`. The resulting value is\n", - "computed by iterating over the data in minibatches of size\n", - "`batchsize`.\n", - "\"\"\"\n", - "function acc(w, X, Y; batchsize = 100)\n", - " sum = 0; count = 0\n", - " for (x_cpu, y) in eachbatch((X, Y), maxsize = batchsize)\n", - " x = KnetArray{Float32}(x_cpu)\n", - " sum += Int(accuracy(forward(w,x), y, average = false))\n", - " count += length(y)\n", - " end\n", - " return sum / count\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before we can train or even just use our network, we need to\n", - "define how we initialize `w`, which is our the vector of\n", - "parameter arrays. The dimensions of these individual arrays\n", - "specify the filter sizes and number of neurons.\n", - "It can be helpful to compare the indices here with the indices\n", - "used in our `forward` function to see which array corresponds\n", - "to which computation node of our network." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "weights (generic function with 2 methods)" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function weights(atype = KnetArray{Float32})\n", - " w = Array{Any}(8)\n", - " # conv1\n", - " w[1] = xavier(5,5,1,20)\n", - " w[2] = zeros(1,1,20,1)\n", - " # conv2\n", - " w[3] = xavier(5,5,20,50)\n", - " w[4] = zeros(1,1,50,1)\n", - " # dense1\n", - " w[5] = xavier(500,800)\n", - " w[6] = zeros(500,1)\n", - " # dense2\n", - " w[7] = xavier(10,500)\n", - " w[8] = zeros(10,1)\n", - " return map(a->convert(atype,a), w)\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Training without Augmentation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In order to get an intuition for how useful augmentation can\n", - "be, we need a sensible baseline to compare to. To that end, we\n", - "will first train the network we just defined using only the\n", - "(unaltered) 500 training examples." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The package\n", - "[ValueHistories.jl](https://github.com/JuliaML/ValueHistories.jl)\n", - "will help us record the accuracy during the training process.\n", - "We will use those logs later to visualize the differences\n", - "between having augmentation or no augmentation." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "using ValueHistories" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To keep things simple, we will not overly optimize our\n", - "training function. Thus, we will be content with using a\n", - "closure. Because both, the baseline and the augmented version,\n", - "will share this \"inefficiency\", we should still get a decent\n", - "enough picture of their performance differences." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "train_baseline (generic function with 1 method)" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function train_baseline(; epochs = 500, batchsize = 100, lr = .03)\n", - " w = weights()\n", - " log = MVHistory()\n", - " for epoch in 1:epochs\n", - " for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)\n", - " batch_x = KnetArray{Float32}(batch_x_cpu)\n", - " g = costgrad(w, batch_x, batch_y)\n", - " Knet.update!(w, g, lr = lr)\n", - " end\n", - "\n", - " if (epoch % 5) == 0\n", - " train = acc(w, train_x, train_y)\n", - " test = acc(w, test_x, test_y)\n", - " @trace log epoch train test\n", - " msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n", - " println(msg)\n", - " end\n", - " end\n", - " log\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Aside from the accuracy, we will also keep an eye on the\n", - "training time. In particular we would like to see if and how\n", - "the addition of augmentation causes our training time to\n", - "increase." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 5: train accuracy 0.550, test accuracy 0.460\n", - "epoch 10: train accuracy 0.694, test accuracy 0.592\n", - "epoch 15: train accuracy 0.820, test accuracy 0.749\n", - "epoch 20: train accuracy 0.862, test accuracy 0.781\n", - "epoch 25: train accuracy 0.888, test accuracy 0.815\n", - "epoch 30: train accuracy 0.896, test accuracy 0.851\n", - "epoch 35: train accuracy 0.920, test accuracy 0.867\n", - "epoch 40: train accuracy 0.930, test accuracy 0.876\n", - "epoch 45: train accuracy 0.938, test accuracy 0.882\n", - "epoch 50: train accuracy 0.954, test accuracy 0.885\n", - "epoch 55: train accuracy 0.964, test accuracy 0.888\n", - "epoch 60: train accuracy 0.968, test accuracy 0.891\n", - "epoch 65: train accuracy 0.972, test accuracy 0.893\n", - "epoch 70: train accuracy 0.978, test accuracy 0.894\n", - "epoch 75: train accuracy 0.982, test accuracy 0.896\n", - "epoch 80: train accuracy 0.988, test accuracy 0.898\n", - "epoch 85: train accuracy 0.994, test accuracy 0.899\n", - "epoch 90: train accuracy 0.996, test accuracy 0.899\n", - "epoch 95: train accuracy 0.998, test accuracy 0.901\n", - "epoch 100: train accuracy 1.000, test accuracy 0.901\n", - "epoch 105: train accuracy 1.000, test accuracy 0.902\n", - "epoch 110: train accuracy 1.000, test accuracy 0.902\n", - "epoch 115: train accuracy 1.000, test accuracy 0.902\n", - "epoch 120: train accuracy 1.000, test accuracy 0.903\n", - "epoch 125: train accuracy 1.000, test accuracy 0.903\n", - "epoch 130: train accuracy 1.000, test accuracy 0.903\n", - "epoch 135: train accuracy 1.000, test accuracy 0.904\n", - "epoch 140: train accuracy 1.000, test accuracy 0.903\n", - "epoch 145: train accuracy 1.000, test accuracy 0.903\n", - "epoch 150: train accuracy 1.000, test accuracy 0.903\n", - "epoch 155: train accuracy 1.000, test accuracy 0.903\n", - "epoch 160: train accuracy 1.000, test accuracy 0.903\n", - "epoch 165: train accuracy 1.000, test accuracy 0.903\n", - "epoch 170: train accuracy 1.000, test accuracy 0.903\n", - "epoch 175: train accuracy 1.000, test accuracy 0.903\n", - "epoch 180: train accuracy 1.000, test accuracy 0.903\n", - "epoch 185: train accuracy 1.000, test accuracy 0.903\n", - "epoch 190: train accuracy 1.000, test accuracy 0.903\n", - "epoch 195: train accuracy 1.000, test accuracy 0.902\n", - "epoch 200: train accuracy 1.000, test accuracy 0.903\n", - " 7.073396 seconds (3.21 M allocations: 277.991 MiB, 1.06% gc time)\n" - ] - } - ], - "source": [ - "train_baseline(epochs=1) # warm-up\n", - "baseline_log = @time train_baseline(epochs=200);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, the accuracy on the training set is around a\n", - "100%, while the accuracy on the test set peaks around 90%. For\n", - "a mere 500 training examples, this isn't actually that bad of\n", - "a result." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Integrating Augmentor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have a network architecture with a baseline to\n", - "compare to, let us finally see what it takes to add Augmentor\n", - "to our experiment. First, we need to include the package to\n", - "our experiment." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "using Augmentor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The next step, and maybe the most human-hour consuming part of\n", - "adding image augmentation to a prediction problem, is to\n", - "design and select a sensible augmentation pipeline. Take a\n", - "look at the [elastic distortions tutorial](@ref elastic) for\n", - "an example of how to do just that." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For this example, we already choose a quite complicated but\n", - "promising augmentation pipeline for you. This pipeline was\n", - "designed to yield a large variation of effects as well as to\n", - "showcase how even deep pipelines are quite efficient in terms\n", - "of performance." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "10-step Augmentor.ImmutablePipeline:\n", - " 1.) Reshape array to 28×28\n", - " 2.) Permute dimension order to (2, 1)\n", - " 3.) Either: (50%) ShearX by ϕ ∈ -5:5 degree. (50%) ShearY by ψ ∈ -5:5 degree.\n", - " 4.) Rotate by θ ∈ -15:15 degree\n", - " 5.) Crop a 28×28 window around the center\n", - " 6.) Zoom by I ∈ {0.9×0.9, 1.0×1.0, 1.1×1.1, 1.2×1.2}\n", - " 7.) Cache into temporary buffer\n", - " 8.) Distort using a smoothed and normalized 10×10 grid with pinned border\n", - " 9.) Permute dimension order to (2, 1)\n", - " 10.) Reshape array to 28×28×1" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "pl = Reshape(28,28) |>\n", - " PermuteDims(2,1) |>\n", - " ShearX(-5:5) * ShearY(-5:5) |>\n", - " Rotate(-15:15) |>\n", - " CropSize(28,28) |>\n", - " Zoom(0.9:0.1:1.2) |>\n", - " CacheImage() |>\n", - " ElasticDistortion(10) |>\n", - " PermuteDims(2,1) |>\n", - " Reshape(28,28,1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Most of the used operations are quite self explanatory, but\n", - "there are some details about this pipeline worth pointing out\n", - "explicitly." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "1. We use the operation [`PermuteDims`](@ref) to convert the\n", - " horizontal-major MNIST image to a julia-native\n", - " vertical-major image. The vertical-major image is then\n", - " processed and converted back to a horizontal-major array.\n", - " We mainly do this here to showcase the option, but it is\n", - " also to keep consistent with how the data is usually used\n", - " in the literature. Alternatively, one could just work with\n", - " the MNIST data in a vertical-major format all the way\n", - " through without any issue." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "2. As counter-intuitive as it sounds, the operation\n", - " [`CacheImage`](@ref) right before\n", - " [`ElasticDistortion`](@ref) is actually used to improve\n", - " performance. If we were to omit it, then the whole pipeline\n", - " would be applied in one single pass. In this case, applying\n", - " distortions on top of affine transformations lazily is in\n", - " fact less efficient than using a temporary variable." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With the pipeline now defined, let us quickly peek at what\n", - "kind of effects we can achieve with it. In particular, lets\n", - "apply the pipeline multiple times to the first training image\n", - "and look at what kind of results it produces." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
    " - ], - "text/plain": [ - "2×8 Array{Array{ColorTypes.Gray{Float32},2},2}:\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] … ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "[MNIST.convert2image(reshape(augment(train_x[:,:,:,1], pl), (28, 28))) for i in 1:2, j in 1:8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, we can achieve a wide range of effects, from\n", - "more subtle to more pronounced. The important part is that all\n", - "examples are still clearly representative of the true label." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we have to adapt the function `train_baseline` to make\n", - "use of our augmentation pipeline. To integrate Augmentor\n", - "efficiently, there are three necessary changes we have to\n", - "make." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "1. Preallocate a buffer with the same size and element type\n", - " that each batch has.\n", - "\n", - " ```\n", - " batch_x_aug = zeros(Float32, 28, 28, 1, batchsize)\n", - " ```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "2. Add a call to [`augmentbatch!`](@ref) in the inner loop of\n", - " the batch iterator using our pipeline and buffer.\n", - "\n", - " ```\n", - " augmentbatch!(batch_x_aug, batch_x_org, pl)\n", - " ```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "3. Replace `batch_x_org` with `batch_x_aug` in the constructor\n", - " of `KnetArray`.\n", - "\n", - " ```\n", - " batch_x = KnetArray{Float32}(batch_x_aug)\n", - " ```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Applying these changes to our `train_baseline` function\n", - "will give us something similar to the following function.\n", - "Note how all the other parts of the function remain exactly\n", - "the same as before." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "train_augmented (generic function with 1 method)" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function train_augmented(; epochs = 500, batchsize = 100, lr = .03)\n", - " w = weights()\n", - " log = MVHistory()\n", - " batch_x_aug = zeros(Float32, size(train_x,1), size(train_x,2), 1, batchsize)\n", - " for epoch in 1:epochs\n", - " for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)\n", - " augmentbatch!(CPUThreads(), batch_x_aug, batch_x_cpu, pl)\n", - " batch_x = KnetArray{Float32}(batch_x_aug)\n", - " g = costgrad(w, batch_x, batch_y)\n", - " Knet.update!(w, g, lr = lr)\n", - " end\n", - "\n", - " if (epoch % 5) == 0\n", - " train = acc(w, train_x, train_y)\n", - " test = acc(w, test_x, test_y)\n", - " @trace log epoch train test\n", - " msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n", - " println(msg)\n", - " end\n", - " end\n", - " log\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may have noticed in the code above that we also pass a\n", - "`CPUThreads()` as the first argument to [`augmentbatch!`](@ref).\n", - "This instructs Augmentor to process the images of the batch in\n", - "parallel using multi-threading. For this to work properly you\n", - "will need to set the environment variable `JULIA_NUM_THREADS`\n", - "to the number of threads you wish to use. You can check how\n", - "many threads are used with the function `Threads.nthreads()`" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Threads.nthreads() = 10\n" - ] - } - ], - "source": [ - "@show Threads.nthreads();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that all pieces are in place, let us train our network\n", - "once more. We will use the same parameters except that now\n", - "instead of the original training images we will be using\n", - "randomly augmented images. This will cause every epoch to be\n", - "different." - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 5: train accuracy 0.520, test accuracy 0.462\n", - "epoch 10: train accuracy 0.658, test accuracy 0.564\n", - "epoch 15: train accuracy 0.732, test accuracy 0.678\n", - "epoch 20: train accuracy 0.786, test accuracy 0.728\n", - "epoch 25: train accuracy 0.828, test accuracy 0.778\n", - "epoch 30: train accuracy 0.884, test accuracy 0.846\n", - "epoch 35: train accuracy 0.870, test accuracy 0.822\n", - "epoch 40: train accuracy 0.916, test accuracy 0.873\n", - "epoch 45: train accuracy 0.912, test accuracy 0.866\n", - "epoch 50: train accuracy 0.914, test accuracy 0.876\n", - "epoch 55: train accuracy 0.936, test accuracy 0.887\n", - "epoch 60: train accuracy 0.944, test accuracy 0.910\n", - "epoch 65: train accuracy 0.940, test accuracy 0.901\n", - "epoch 70: train accuracy 0.932, test accuracy 0.888\n", - "epoch 75: train accuracy 0.952, test accuracy 0.910\n", - "epoch 80: train accuracy 0.952, test accuracy 0.905\n", - "epoch 85: train accuracy 0.968, test accuracy 0.929\n", - "epoch 90: train accuracy 0.972, test accuracy 0.927\n", - "epoch 95: train accuracy 0.968, test accuracy 0.922\n", - "epoch 100: train accuracy 0.972, test accuracy 0.933\n", - "epoch 105: train accuracy 0.980, test accuracy 0.934\n", - "epoch 110: train accuracy 0.978, test accuracy 0.924\n", - "epoch 115: train accuracy 0.976, test accuracy 0.919\n", - "epoch 120: train accuracy 0.986, test accuracy 0.939\n", - "epoch 125: train accuracy 0.982, test accuracy 0.934\n", - "epoch 130: train accuracy 0.988, test accuracy 0.933\n", - "epoch 135: train accuracy 0.992, test accuracy 0.935\n", - "epoch 140: train accuracy 0.994, test accuracy 0.942\n", - "epoch 145: train accuracy 0.994, test accuracy 0.946\n", - "epoch 150: train accuracy 0.994, test accuracy 0.946\n", - "epoch 155: train accuracy 0.994, test accuracy 0.945\n", - "epoch 160: train accuracy 0.992, test accuracy 0.943\n", - "epoch 165: train accuracy 0.998, test accuracy 0.946\n", - "epoch 170: train accuracy 0.998, test accuracy 0.945\n", - "epoch 175: train accuracy 0.998, test accuracy 0.946\n", - "epoch 180: train accuracy 0.998, test accuracy 0.950\n", - "epoch 185: train accuracy 0.996, test accuracy 0.952\n", - "epoch 190: train accuracy 0.998, test accuracy 0.947\n", - "epoch 195: train accuracy 0.996, test accuracy 0.952\n", - "epoch 200: train accuracy 0.998, test accuracy 0.950\n", - " 24.805563 seconds (39.13 M allocations: 21.675 GiB, 9.24% gc time)\n" - ] - } - ], - "source": [ - "train_augmented(epochs=1) # warm-up\n", - "augmented_log = @time train_augmented(epochs=200);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, our network reaches far better results on our\n", - "testset than our baseline network did. However, we can also\n", - "see that the training took quite a bit longer than before.\n", - "This difference generally decreases as the complexity of the\n", - "utilized neural network increases. Yet another way to improve\n", - "performance (aside from simplifying the augmentation pipeline)\n", - "would be to increase the number of available threads." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Improving Performance" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One of the most effective ways to make the most out of the\n", - "available resources is to augment the next (couple) mini-batch\n", - "while the current minibatch is being processed on the GPU.\n", - "We can do this via julia's build in parallel computing\n", - "capabilities" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First we need a worker process that will be responsible for\n", - "augmenting our dataset each epoch. This worker also needs\n", - "access to a couple of our packages" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [], - "source": [ - "# addprocs(1)\n", - "# @everywhere using Augmentor, MLDataUtils" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we replace the inner `eachbatch` loop with a more\n", - "complicated version using a `RemoteChannel` to exchange and\n", - "queue the augmented data." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "async_train_augmented (generic function with 1 method)" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function async_train_augmented(; epochs = 500, batchsize = 100, lr = .03)\n", - " w = weights()\n", - " log = MVHistory()\n", - " for epoch in 1:epochs\n", - " @sync begin\n", - " local_ch = Channel{Tuple}(4) # prepare up to 4 minibatches in adavnce\n", - " remote_ch = RemoteChannel(()->local_ch)\n", - " @spawn begin\n", - " # This block is executed on the worker process\n", - " batch_x_aug = zeros(Float32, size(train_x,1), size(train_x,2), 1, batchsize)\n", - " for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)\n", - " # we are still using multithreading\n", - " augmentbatch!(CPUThreads(), batch_x_aug, batch_x_cpu, pl)\n", - " put!(remote_ch, (batch_x_aug, batch_y))\n", - " end\n", - " close(remote_ch)\n", - " end\n", - " @async begin\n", - " # This block is executed on the main process\n", - " for (batch_x_aug, batch_y) in local_ch\n", - " batch_x = KnetArray{Float32}(batch_x_aug)\n", - " g = costgrad(w, batch_x, batch_y)\n", - " Knet.update!(w, g, lr = lr)\n", - " end\n", - " end\n", - " end\n", - "\n", - " if (epoch % 5) == 0\n", - " train = acc(w, train_x, train_y)\n", - " test = acc(w, test_x, test_y)\n", - " @trace log epoch train test\n", - " msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n", - " println(msg)\n", - " end\n", - " end\n", - " log\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that for this toy example the overhead of this approach\n", - "is greater than the benefit." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualizing the Results" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before we end this tutorial, let us make use the\n", - "[Plots.jl](https://github.com/JuliaPlots/Plots.jl) package to\n", - "visualize and discuss the recorded training curves.\n", - "We will plot the accuracy curves of both networks side by side\n", - "in order to get a good feeling about their differences." - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Plots.PyPlotBackend()" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "using Plots\n", - "pyplot()" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA4QAAAGQCAYAAAD2lq6fAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3XlcVFX/wPHPnRlgBBGURcEFEBUtczdzwSXXNk0je36p1aMpaKa54VKuleUGaqWikpqVLe7Lo1mWa2gJmktqiYKmqKyDLAPMzPn9MTJKgBvLgJz368WL4c65537vHGbu/c4591xFCCGQJEmSJEmSJEmSKhyVtQOQJEmSJEmSJEmSrEMmhJJUwXTu3BlFUfIs27t3L4qiMGPGDOsEJUmSJElSgVavXo2iKKxevdraoUiPKJkQStJDiImJQVGUfD8ODg40adKEmTNnkpaWZu0wJUmSJOm+DB48GEVRcHFxISsry9rhlHm55wFvvPGGtUORpCLTWDsASSrPfH19GThwIABCCOLj49m5cyczZsxg165dHDx4ELVabeUo7+3JJ5/kzJkzuLq6WjsUSZIkqZTdvHmT7777DkVRSEpKYvPmzbzyyivWDkuSpFIiE0JJKoJ69erlG2aZlZVF27ZtOXz4MPv27ePpp5+2TnAPwN7enoYNG1o7DEmSJMkKvv32W9LT0xk7diwLFy4kPDxcJoSSVIHIIaOSVMzs7Ozo0qULAAkJCZblv/zyC4MHD8bPz4/KlStTuXJlWrVqxfLlywusJyoqioCAAOrUqYOdnR1ubm60bt2aDz/8MF/ZGzduMGbMGOrVq4ednR2urq689NJLnDp16r5iLuwaQm9vb7y9vUlLS2P06NF4enpiZ2dHkyZNWL9+fYF1ZWdnExISQosWLXBwcMDR0RF/f3+2bt16X7FIkiRJpSs8PByNRkNwcDBdunRhz549xMbG5iunKAqdO3cusI7c48W/xcTE8Morr1CtWjUqV65Mp06d2L9/PzNmzEBRFPbu3Wspe+ex6Ndff6VLly44Ojri5ubGiBEjyMzMBGDHjh20bdsWBwcHqlevTnBwMAaDocC4tmzZQteuXalatSparZbGjRszf/58jEZjnnJ3Xqe3e/du2rVrh729PS4uLrz++uskJibmKevj4wPAmjVr8lw6cuf+CCH4/PPPad++PVWqVMHe3p5WrVrx+eefFxhrUlISQUFBVK9eHXt7e1q3bs2mTZsKLCtJxUkmhJJUzLKzsy0HtWbNmlmWz5kzh/3799O6dWtGjhzJwIEDSUhIIDAwkHHjxuWp4/jx47Rr146dO3fSoUMHxo4dS0BAAPb29vkSyOjoaFq2bMnChQvx9fXl7bff5tlnn2XXrl089dRTHDlypEj7k5OTQ48ePdi9ezcvvfQSAwcOJDo6mv79+7N79+48ZbOysujZsyfjxo1DCMGQIUMYOHAgsbGx9OnTh08//bRIsUiSJEnF688//+Tw4cP06NGD6tWr89prr2EymVi1alWR675y5Qrt2rXju+++o02bNowaNQpXV1e6d+9+12PTkSNH6Nq1K05OTgQGBlKnTh2WLl3K0KFD+fbbbwkICMDLy4vAwECcnZ2ZN28es2fPzlfP5MmTefHFFzl37hz9+vVjxIgRVKpUiQkTJvCf//ynwG1v3bqVF154AU9PT0aMGIGvry9ffPEFffr0sZRp1qwZo0ePBqBp06ZMnz7d8pObFAshGDBgAEOGDCE+Pp5XX32VN998k/T0dIYMGcL48ePzbDcjI4POnTsTFhaGr68vo0ePxs/Pj1deeaXQL2AlqdgISZIe2MWLFwUgfH19xfTp08X06dPFtGnTxIgRI4Svr6/QarVi3rx5eda5cOFCvnpycnJE9+7dhVqtFrGxsZblY8eOFYDYvHlzvnUSEhLy/N2uXTuhVqvFrl278iw/d+6ccHR0FE888USe5Z06dRL/fuv/8ssvAhDTp0/Ps9zLy0sAok+fPiIrK8uy/KeffhKA6NmzZ57yU6ZMEYCYOnWqMJlMluWpqamiVatWwtbWVly5ciXfPkmSJEnWkXu8WbdunRBCiJs3bwoHBwdRp04dYTQa85QFRKdOnQqsx8vLS3h5eeVZNnDgQAGIDz/8MM/y8PBwAQhA/PLLL5bluceifx//srOzRZMmTYSiKMLV1VX89ttvludSU1OFu7u7qFatmsjOzrYs3717t+U4lZaWZlluMplEUFCQAMT69esty1etWiUAodFoxMGDBy3LDQaD6Ny5swBERESEZXnuecDrr79e4OuxfPlyAYj//ve/eeLKysoSL7zwggDE0aNHLcunT58uADF06NA89ezatcvymqxatarAbUlSUcmEUJIeQu6BoLCf559/Xhw7duy+6tqwYYMAxOrVqy3Lcg/QP/zww13XjYqKEoAYPHhwgc/n1nPy5EnLsodJCAtKZr28vES1atUsfxuNRlG1alXh6+ubJxnMtXXrVgGITz755K77JEmSJJWO7Oxs4ebmJqpUqSIyMzMty3MTuX8fgx4kIdTr9cLOzk64u7sLvV6fp6zJZBJ+fn6FJoRdunTJV/+sWbMsCda/DR48ON+xqnfv3gLI82VrrpSUFKEoinjppZcsy3ITwtdeey1f+dznFi9ebFl2r4SwSZMmwsHBQWRkZOR77sSJEwIQ48aNsyzz8fERtra2Ii4uLl/5rl27yoRQKlFyUhlJKoKePXuya9cuy9+JiYkcOnSI0aNH0759e37++WfatGkDmGdxmz9/Pps3byY6Opr09PQ8dV29etXyuH///ixcuJC+ffvyyiuv0L17dzp27EjNmjXzrHP48GEArl+/XuA9BM+ePWv53bhx44faR2dnZ8u1EneqVasWERERlr/PnTtHcnIynp6ezJw5M1/5+Pj4PDFJkiRJ1rVlyxbi4+MZMmQIWq3Wsvy1117jyy+/JDw8nB49ejxU3efOnSMrK4tWrVphZ2eX5zlFUWjXrh3nzp0rcN07L7fI5eHhcc/nrl69ajleHT58GAcHh0Kv16tUqVKBx6OWLVvmW1arVi0AUlJSCqzr3zIyMjh58iSenp7MmTMn3/M5OTnA7eNhamoqFy9e5LHHHqNGjRr5yvv7+7Nnz5772rYkPQyZEEpSMXJxcaF3797Y29vTvXt33nvvPX788Ueys7Pp3LkzUVFRNG/enEGDBuHi4oJGoyEmJoY1a9bkue9TmzZt2Lt3L7Nnz+brr7+2XMvRunVr5syZY5m0JikpCTBfYL9jx45C4/p38vkgnJycClyu0WgwmUyWv3NjOX36NKdPny6RWCRJkqTiEx4eDpgTwDt17dqVmjVrsmXLFpKSkqhWrdoD152amgqAu7t7gc9Xr1690HWrVKmSb5lGo7nnc7mJFpiPSQaDocAvKHMVdDy6W/3/noimMMnJyQghuHLlyn1tvyivlSQVB5kQSlIJyO0V/P333wHzt7BRUVEMGTKElStX5in7zTffsGbNmnx1+Pv7s3PnTjIzMzly5Ajbtm1jyZIlPPfcc5w6dYq6detaDlyffPIJI0eOLOG9urvcWF566SV5AbwkSVIZd/nyZcvEYJ06dSq03JdffsmoUaMAc89eYbN56nS6PF8g5h4Tbty4UWD569evP1Tc96tKlSooipJntu/SkrvvLVu25OjRo/dd3lqvlSTJhFCSSkBycjKApQctOjoaIM8sZbkOHDhw17oqVapE586d6dy5M87OzkybNo0ff/yRwMBAS+IZERFh9YSwUaNGVKlShaNHj5KTk4ONjY1V45EkSZIKt3r1akwmEx06dMDPzy/f8waDgTVr1hAeHm5JCKtWrcqVK1fylY2JiSElJSVPQujn54ednR2RkZFkZWXlGTYqhMhzyUFJaNOmDTt37uTvv/+mfv36xV6/Wq0GCu41dHR0pFGjRpw5c4aUlBScnZ3vWleVKlXw8fHh/PnzXLt2Ld+w0XudJ0hSUcnbTkhSCQgJCQGgY8eOAHh5eQFw8ODBPOX27dvHihUr8q0fERGBXq/Ptzz3W8Lcaz2efPJJ2rRpw7p16/j222/zlTeZTOzbt68Ie3L/NBoNw4cPJzY2lvHjx+cZupPr1KlThX4DKkmSJJUOIQSrVq1CURTWrFnDypUr8/2sXr2atm3bcuLECUsvV+vWrYmJiclzXMnOzmbs2LH5tmFnZ0dAQADXr19n4cKFeZ774osvSvx68twkdvDgwXnuIZjr2rVrnDlz5qHrr1q1KoqicPny5UK3n5GRwdChQwscmnrx4kViYmIsfw8aNIjs7GymTZuWp9zu3bvl9YNSiZM9hJJUBOfPn88zmUtSUhKHDh0iKiqKqlWrWi4mf+GFF/D29mbu3LmcOnWKxo0bc+7cObZv307fvn3zDbGcM2cOv/zyCx07dsTHxwetVktUVBR79uyhbt269O3b11J23bp1dOnShf/85z8sXLiQFi1aUKlSJS5dukRERATx8fEFJpclYebMmURFRbF48WJ27NhBx44dcXd358qVK5w8eZI//viDiIiIQq+TkCRJkkrezz//zMWLF+nUqRN169YttNx///tfIiIiCA8Pp1WrVowdO5bdu3fz7LPP8n//93/Y29vz448/4uzsbJnY5U4fffQRP/30E5MmTWLfvn00b97ccuzr1asXu3btQqUqmb6JXr16MXXqVN5//33q1atHr1698PLyIjExkfPnz3PgwAE++OADGjVq9FD1V65cmdatW7N//34GDRpE/fr1UalUDBo0yHKPxMOHD7NmzRoOHTpEt27d8PT05Pr165w9e5YjR47w9ddfW+5bGBwczMaNG1mxYgWnT5+mY8eOXL58me+++47nnnvurvMESFJRyYRQkoogOjo6zwXjdnZ21KpVi+HDhzNp0iTq1KkDmA8cP//8MxMmTGD//v3s3buXxx9/nK+++orq1avnSwiHDx+Ok5MTR44cYd++fQghqFOnDlOmTGHMmDF5Lnr38fHh2LFjhISEsHnzZlatWoVarcbDw4OOHTsSEBBQOi/Grf3fuXMn4eHhfPHFF2zYsIGsrCyqV6/OY489RlBQEE888USpxSNJkiTllzuZzBtvvHHXcq+88gqjR49m3bp1hISE0KNHD7777jtmzZrF2rVrqVatGi+//DKzZ88ucCbr2rVrExERwcSJE9m9ezf79u2jZcuW7N69m++//x4oeBKX4jJr1iw6duzI4sWL2bNnDykpKbi4uODj48OMGTMYMGBAkepfu3YtY8aMYfv27eh0OoQQdOjQAS8vLxRFYfXq1Tz77LOsWLGC7du3k5aWhru7O/Xr12f+/Pl069bNUpeDgwP79u1j8uTJbNq0iaioKB5//HG+/fZbdDqdTAilEqUIIYS1g5AkSZIkSZIqjg4dOhAREYFOp6Ny5crWDkeSKjR5DaEkSZIkSZJUIuLi4vIt+/LLLy3DKGUyKEnWJ3sIJUmSJEmSpBLh4uJC8+bNeeyxx1Cr1Rw/fpy9e/fi6OjIoUOH5GUEklQGyIRQkiRJkiRJKhHvvvsu27Zt49KlS6Snp+Pm5kaXLl2YOnUqDRs2tHZ4kiRRCkNGR40ahbe3N4qicPz48ULLhYeHU79+fXx9fRk6dGiBU9ZLkiRJUnkjj4NSRfbhhx9y4sQJUlJSyMnJ4erVq3z11VcyGZSkMqTEE8KAgAAOHjxouQ9bQS5evMjUqVM5cOAA58+f5/r16yxfvrykQ5MkSZKkEiePg5IkSVJZVuIJYceOHalVq9Zdy6xfv57evXtTo0YNFEUhKCiIdevWlXRokiRJklTi5HFQkiRJKsvKxH0IL126lOebU29vby5dulRgWSEECQkJ2NraoiiKZbmdnR12dnYlHqskSZIkFTd5HJQkSZKspUwkhA/i5s2buLu751seHBzMxIkTrRBRxZaamlqiN5WtqITRgEiJRyRdx5R8HZF0DVPyDUTSNURKPBgNAHRYvZ9YXSZeTvYcfPPpAuvqsPJnYnUZdy1jrXJym9Yv90hsM/xnYlMy8K5Th8hjxwotl6tatWr3LFOWyeNg2SKPg9Yn28D6SrsNhCEHsvWltj0Lu0oo6qKnT0KYICcbxVZbDEGZzxtvpmfcdxv8+zhYJhLCOnXqEB0dbfk7JiaGOnXq3HWdy5cvYzAYLDskvxm1nvJ+cmUtpmw9xoQ4DAlxGBKu3voxP169L5KwyAsEtvBhUPO6aFw9sHX1QNP4KTSuHmhcPdG4ejKxwSbmzpnLxMmTqRUUVOB2JtVbxvy5cxgfPLHQMtYq96hsc85HH8k2sPI2c9ugvH4eyeNg+VZe/+8eJbINrK802iAnLoa0Q9tZvnwly377y3ye1KR2oeXXnrhMWNTFu5a7nzK3y8UwvGMTBj/T5da5mAcaN080Lh5oXDxQbG9/BgujEWPyjXzneIaEOFbtiSDs9/MEtW3E4B7+aFw9UOfW5+qJxs0TlYOTZRSIEAJTeiqGhKu3zh3z1rkm4hTLzyYxcep0gu5yvCxMqd12wtvbm82bN9OsWbN8z124cIEOHToQFRVF9erV6dOnDz169GDkyJH5yqampuLk5IROp8tzIJSsIykpSbbBXZgy0jAkxmGIN79xV3yzgU+3/kRgm/oMrO9iKafYVTJ/mLh5onbxoMXwd7l49Tp1vWpz/sJFFJW60G3INrA+2QbWVx7aQB4HH03l4X/vUSfbwPpKsg2E0UDmyV9JO7CN7OiTqKpUpcOKn7h4LYG6NT049b/1ha7b+NkALlyJu2u5+ylzZzmf6q78PnuUJSkT2VmWMmpnV778M44lP/3OsOZ1GPTErevHVWo0LjUsSWSLt2dyMe4GPjXcOLpgoqUuU2qypS7Fzp6vzieydO9xAlvVZWCj6pbnVI5V7+gg8KDZkPFcvHINXx9vzl+4eJ+v7G0l3kMYGBjIjh07uHbtGj179sTR0ZHz58/z5ptv0rt3b3r37k3dunWZOXMm7du3B6Bz584EBgaWdGiSVGxEdhafzJzCws/XMqJXe15r4mV+Y6enWsqoHKrw6dYfiUlMZXlUDG9P/8j8rZKrB6rKznmuBQq+mmPpGblbMihJUtknj4OSJJUFwmjEmBLP0k8XE7r8c0YP7M+b/Z4vtPzKjdtZ9OV391Vu4dpveWfQK4WWM9f1PWOHD2X4qHdQOVbNc95TEKMukbRf/0d6xE5MqUnY1m1MtdcmU6lJO4I9wy3nSZWatCu0jgnvTbtnufsp8+9yLoPNvXBCCEw3k80JXfxVDIlxLFkylpjkm6w4k8CYJV+gcfVE7eyGor59Phccr7LUVe3V2z16pqxMDIlxll7AZauDiElKZfmJq4ya+4k5AXSpgUprnye24PduMuejjxgf/HCXDZS7G9PLb0bLlor+rZwh4Spph3aQfuQH2i/5H7G6TLxdnIhcPPX2UIJbv1X2jixbdnsI3MN06RekordBWSDbwPoqUhvI42DZUpH+98oq2Qa3CUM2hsTreYYTWoYYJl4Dk/GO+QcqcfCNjoXWVZzl/l1GsdXeGiZ56zzJ5fbQS2PyDdIObiPzxK8oGhvsWz1N5Q4vYOPpU+TXpzQU57neg9RVlPeBTAilIqmIH8LCZER/5ijpB7ejP3sUpZIDDm168tXfiYQuCSvWZO9+VMQ2KGvKYhuYTCbi4uKIj4/HZDJZO5wSp9PpcHJyAkClUuHm5oaHhwcqVYnfXanU3es4qNPpiImJwWg0WilC61Gr1Xh7e1v+F0pDWXz/VzQVuQ2EEORcvYj+zFGyzkWyctP/CDsabb4erqWvOdG6Y+4BjasH4dt/IvSzpYybEExQUOEjEZYtC2PBvHmMmzDhnuXmzvmY4ImTCi23bFkY8+fNZWzQUAY/9/StS2luX1NnTL4OJpPler7hnZrz1rhg7Ft3Q1XJocivU0UgE0LJairSh7AxPZWMwz+Q9usOjInXsKldn8odXsC+eac8FxGXtorUBmVVWWuDCxcuMHjwYPbt22ftUKyqc+fOhIeHU7duXWuHUqzudhzcvXs3ffr0Qa+3wux7ZYRWq2XLli306NGjVLZX1t7/FVFFawNjWgpZ546hPxuJ/lwkptRkFFs77Oo15ckZYVyMu3FrDoIYlFL6UqyobSCMBoxJN/Br0ZoL/1x96GvhKrKitEGZmGVUksqyT2fPZMGixQxrUpNBTb2wb96Jyq9NxqZOg3uOf5ek0paVlUXTpk1xdXXl66+/xtfXF42mYn3UGwwGoqOjmTJlCs2aNSM+Pr5CzL6p0+no06cPXbp0Ydq0adja2lo7pFKXnZ3NrFmz6NOnD9euXSvVnkJJKkmfzp5JyOJPCWrfmFfraEEIbDx9sG/VFW3DVtjVfQxFY0uwyfOOOQjKzwgJRa1B4+bJhHenWuKXSo/sIZSK5FH9Vk5kZ5FxbB9pB7fR5v1wYnWZ+NRw4++//0ZduWydYDyqbVCelKU2OHHiBE2bNuXAgQN06NDB2uFY1cGDB/H39+fEiRM88cQT1g6n2BR2HPzjjz9o1qwZERERPPXUU1aO0noOHz5M27ZtOX78OE2bNi3x7ZWl939F9ai2QUHnIt5uVTm1/Vu0fi1QO7ncu5JS8qi2QXlSlDYoP18dSFIpMCReI2XrSuJmDCR5XQgqhyqMHTUSX29vgqfPKnPJoCT9m8FgAMDe3v4eJfMzGo3s3buXdevWsXfv3nJ/DVrua5CTk2PlSEpHbns9aM/go9buuftf3vdDKr+EEKQd3MbHzz2Jj5szC98ZSvbVC9xvH0z+cxEnxo56G19vbybOmo3Dk93LVDIolX8VaxyRJBVAmEzoz0aSfnAb+jO/o2gdcHiyOw4dnsfGrSajgFGz5lo7TEkqURs3bmT0mHH8cynGsqxWHW8WhS6gX79+Ra5/xowZTJo0Ca1Wa5X1pYLJdpek4mXUJZK0LoSss5GE/X6emAQdC9d8TQCXUVWphtavBdpGrbDza4HaoYplPfO5yNFb5yJHzecibXpQuf3zaNw8b52LzLHejkmPNNlDKFVYS0LmU9ezOnNfbE/i8qkYdYlU7T8Kj5lf4tw3EBu3mtYOUZJKxcaNGwkICOCfqo/DpAOwOAkmHeBK1ccJCAhg48aNRd7GzJkzizTRSVHXl/KT7S5JxSvj+AGuzwki5+oFXAM/YOKs2fj6eDPxgzm4Dp+Nfcsu5PxznqQvPibuvVe4HjKK0JGvU9fD/da5yDSMqUlUfWW0+VzkxWFo3DytvVtSBSB7CKUKQ+Rkk3XhlHlWrrORzP3oC2J1mSw7nMM7iw5i691IThIjVThGo5HRY8YhmjwLwzdA7iQEddsghm9AWfoS74wdT58+fVDfcVPdB5F7GxZ/f3/UajVbtmzhgw8+4I8//kCv1/PUU0/x6aefYmtrywcffMBXX31lmQRmy5YtfPTRR3nW3717N+7u7kXf+QpMtrskFR9TZjopG5aQcXQPlZp2wLn/KNQOVQhq1CrPbai0fi2gz1CMKQnoz0WhPxvJovc/JzYlnWWHDbyz+BC2Xg3luYhU6mRCKD2yhBAYbvxjTgDPHCU7+iQiJ8s8ZKNhS8aOCGLR1xuYMGkydj6PWTtcSSoxGQbB2ZSCnzt6aL95uOCra28nBblUKkSviVye05Hw7ftp1b5zgXU0dAZ7TeEnMMuWLSMsLIwDBw7g7OzMsGHD8Pf3Z8WKFQghGDp0KIsWLeLNN99k/vz5xMXFUalSJTIyMlCpVPnWl+6PbHdJApM+g0Xjh7P4640Mf7oVQ17oYb4B+q178mlcPVE5Vn3oJEz/9x8kf70AU2YaVQeMx75V13vWpXZ2xaFNDxza9GBiuhPz581lwsRJ2Hk3eqgYJKmoZEIoPVJMmelk/XWMpZ8s5pONOxnWrA6DmtfFzvdxqjwzELuGLbHx8EFRFPN4/Nkh1g5Zkkrc2RRouclQ8JO//WP+7fl4wc/XNC8P3P4P3Ci4jsi+Glq43n88mzdvJiIigpAQ8/svMzMTtVpNlSpVqF+/PgMHDqRHjx4899xz1KpV6/4rlvKQ7S5VZMJoJP3wLlJ3rmXRV9uITclg6YETvNGpFdkXTmPUJVjKKrZaNK4erD31D0t2H2b0oFcIHDoEjasnamdXFFX+XnKRk43uf2tI27sR27qNcRswD0216g8c5/ARIxg+YkSR9lWSikomhFK5JkxGcv45j/7MUfRnI8mOPQsmE59s+pWY5DRW/p3MxJ3fo7KTExJIFVdDZ/PJe0GOutcicCVw9TTUbZO/wJXTAIQ9X4tW7Quuo+EDdt4IIdiwYQMNGjTI99zhw4f59ddf2bt3L0899RTr1q3D39//wTYgAbLdpYpJCIH+zO/otq7EcO0S9q26MmFqS0I/XcL44Im43xrCacrWY0y8hiEhDkPCVQwJcXw2bx0xCTpCV6zipZzz5grVNmhcqufpUfz8h/2ELlvBsObejHx3OpU79S1X9/yTpH+TCaFU7hh1iZbrALP+OoYpPRXFzh67Bs1wfukttA1bMrHhZsuNTWUyKFV09hql0J6cps935P063lzZ+THizmvJAEwmlF1zqOXlw5DnO6JWP/x1LY6Ojuh0OpydnXnxxReZM2cOYWFhaDQakpOTSUxMpHr16ty8eRN/f3/8/f05ffo0x44dw9/fP8/60v2R7S5VNNlXLqDbsoKsv45hV68J1cZNwLZ2fd4C3ho7IU9Zla0WlYc3Nh7elmUTEzXMnzuHcePGU73/ixhzk8X4qxgS49CfjcKQ+D9CwvcQq8tk5V/JTO7yUunupCSVAJkQSuWCKVtPZtQ+Ppv/MUt+OkJgSx8GP/s0Du2eRduwFbbeDVHUt/+dg4KC8lzILUlSwdRqNYtCFxAQEICy9CVEr4nm4YJXTqPsmgMn/sfC9esfemKRXOPGjaN79+7Y29uzdetW5s6dS7NmzVCpVGg0GubOnYtWqyUgIID09HQURaF+/fq8/vrr+daXk4sUnWx36VFiSk0iaddqMn7/EY1bTVzenIH28TYPfF3gv88dCpptXJiMBNcMJeSTzxg/cWJRQ5ekskGUMzqdTgBCp9OJxMREa4dT4ZV0G+TcuCKSN4WJfya/JC6/00t4uzoLQNT1qlOi2y1P5PvA+spSG0TPBZ7rAAAgAElEQVRGRgpAREZGPtB6GzZsELXqeAvA8lPby0ds2LChhCIteQ/7WpR1hR0HH2Z/ZbsXXVl6/5d1xvSbYtGUccKnhpv45P2pwmQwPHRdJqNB6C/+KRYEDRRezvbio14txM0DW4XJkFOMEUv3S74PrK8obSB7CKUyR5iM6M8cJe3ANrLOHkVl74hDm55Ubv8cE/22WoaCSpJUfPr160efPn04cOAAcXFxeHh4WKb7lx5dst2lkiRMRrIv/U3W2Uj05yLJjjlLyKq9xOoymT9/Pn3TTmHXoBlav5ZoG7ZE41LjrvUZUxJuXTJyFP1fxxAZaSz+5iCxKRmsOJvIpA4vlNKeSdKjRSaEUplhTNORcWQ3aYe2Y0y6jk3t+lT9v7HYN++EYmu+N5UcCipJJUetVtO5c2drhyGVMtnuUnHKc53/uShMGTdRtA5o/Zrj3P9tJtTuQugnS3hn6Bs4tqqP/sxRUjZ8BiYTGvdaaBu2xK5hS+x8m6CoVOb7B9+aOM5wLRYUBZvaDajs38c8Z0CDvcydM4cJEydZe9clqdySCaFkVYbEa+jPRrJs2TI+3fYzga18CXxjEA6vTcbWy0/enFWSJEmSygGjLpEFQ15myU+Hzdf5P9cNhw4voG3UCts6fii3ep3favtMngleqvR4FVNGGll/H0d/NpLMkxGk7d/C2lNXCYuMJrC5N6/7m3sRq/R4FTu/5qgdqljWHz7iMV75z3+oVq1aqe+zJD0qZEIolSpTlp6s83+Yh4+cjcQQfwVUKj7b+SuxugxWntcxeeCEe1ckSZIkSVKZYMpMJyHsPZb+/Lt59s3oNCaPXXTf66vsK1OpaQcqNe2AEALDjX9Y0bwVsSkZrDyfyqQ9X8oviCWpBMmEUCpxOXEx6M/8jv5MJFkXToMxB3VVd7SNWuH0wmDs6jdjYoO18tpASZIkSSpnhCGbxPCZGJLjGT9xEqFhK4t0LFcUBZvqtQmeNtNyXiCTQUkqWTIhlEqMMBoJHTGIxd9uJqh1fd4M6I1T7yHmC8fda+X5gJfXBkqSJElS+SJMJpK+nEdW7Fnchs/mrbqNeWviu8VStzwvkKTSIxNCqUSYMtNJXPMRi7/dYh4+cl7HpMD3rR2WJEl3YTQa5WyTFZBsd+lhCCHQbVpG5h+HcPnve9jVbWztkCRJekgqawcgPXoMCVe5sXAM2TFnGDdmNL4+3oyXs39JUpm2ceNG6tX1oUuXLrz66qt06dKFenV92LhxY7HUP2PGDPR6/QOvd/XqVfz9/YslBik/2e7Sw7q553vSDmzFOeAtKjVpZ+1wJEkqApkQSsUq6/wJboSMBqMB9zELeXv6bM5fuCiHfUhSGbZx40YCAgKop8liS/82nB3elS3921BPk0VAQECxJAczZ84sMDEwGAx3Xc/T05MDBw4UeftSfrLdpYeV/tuPpG7/HMeeA6jc/jlrhyNJUhHJhFAqNumHdxG/ZDI2NX1xH7MIm+q1rR2SJEn3YDQaGTfmHbr6uBH+fDNaeDjjYKuhhYcz4c83o6uPG+PHjsFoND70NnK/EPL396dZs2Y8++yzDB48mI4dO9K4sXmY2YABA2jVqhVNmjThueee49q1awDExMTg7OxsqUtRFGbPns2TTz6Jj48Pq1atKsLeV1yy3aWHlfnn7yR/E4rDU72o0mugtcORJKkYyIRQKjJhMpKyKYzkbxbi8FRPXIM+QOXgaO2wJEm6xZStJ/vy3wX+/LxhHTGXLvN2Kx9U/5rJT6UojGzlw8XYS/y8YV2hdZiy7z4kcNmyZQAcOHCA48eP4+7uTmRkJDt27ODs2bMALFy4kKNHj3LixAn8/f2ZMWNGofXZ2dnx22+/sXPnTkaNGnXP3qaKSra7VNyyY8+RtPoDtI1a4/zy23L2T0l6RMhJZaQiEVkZJK4MRX8mEud+w3Hw7y0PEJJUxhiuX+bGgrcLfO7cuTgA/FwqF/h87vJzq+bR6FePAsu4j/sE29r1Hyiml19+GUfH218cff3116xduxa9Xo9er8fV1bXQdQcMGABAw4YN0Wg0XLt2jVq1aj3Q9isC2e5SccqJv0LC8mnYeNal2uuTLTealySp/JMJofTQDAlx6D+fDjdTcB02C22jVtYOSZKkAmiq18Z93CcFPucXcQR2DeJcYhotPJzzPX8uMc1c7r8TcG/bptD6H1TlyrcTkYMHD7J48WIiIiJwd3dn69atTJs2rdB1tVqt5bFarZY9RYWQ7S7dD5GTDULctYwxPZWEZe+iquyEy9CZqGy1dy0vSVLxEEIgIN9IjuImE0LpoWRfvUDIkJdZduQvJkyazFsyGZSkMktlqy20J+dpz7p415nCJ0cvEv58szwHHZMQfHr0Ij5edXj6pf8r0q0IHB0d0el0ea4Ly5WcnIyjoyMuLi5kZ2cTFhb20NuRbpPtLt1L6OghLFqzjsAWPgxqUniCv/bEZcKOX2LCe9N4y6FKKUYoSWVXllEQnQp/6wR/6QR/pwr+0sHpTWFk7Axl0FtjWThxOFrNgyVzQggiEwTrogWfL19GyvZQnJ8fg1+fQDztoaa9Qk0H8Lz1u6aDQk37ou2LTAilB5Z9+W8Slk4h7LfzxCanEbpsBW8FT7F2WJIkPQS1Ws2C0IUEBAQwZPtxRrbywc+lMucS0/j06EX2XIxn/fr1Rb4v3bhx4+jevTv29vZ4enrmea5Xr158+eWX+Pn54eLiQrdu3bhy5UqRtifdnWx3KSv6FIvWfEOsLpMV55IYPe/TQsuu6P2q+Xj/6RLeGjuhFKOUJOszmASnkmHu4qVsXxlCzb5j0LcfRmwa5PatV7aB+lWggZNC1K5QMuKiCVsUwlc1h9K7jsLLdVX0qqXcNTn8M1mwLtrEN9EmzqdC9Upg/CEU4qMRu0N5/PVArmTAvmsmrqRDUtbtdZV9y3Hbv4CZU4IfbmZ/Uc7odDoBCJ1OJxITE60dToWjv3hG/DOxn7i2YJT4bNFC4V2njli6dKm1w6rQ5PvA+spSG0RGRgpAREZGPtB6GzZsEN51agvMxzcBCB+vOmLDhg0lFGnJe9jXoqwr7Dj4MPsr273oytL7/34Z01PF1ekDxdyXuwlfb+97HseXLl0qfH3uXc5aymMbPGoepTaIzzSJrTFGMfk3g+iyLUc4fJ4tWJ4tcPMVgKjk4SuCDxvEyjNGse+qUcSlm4TJZLKsv3TpUuHl4yumL1giZkUaxBPrzes7rsoWr+7JEZsuGkVmjrn8xVST+OiYQTS5VcZpdbYYvDdH/PiPUeQYTZa6CnrvZeSYxHmdSey/ahRutc2xefn4PtQ+yx5C6b5lXThFQtg0bDy9cQ18nxFaB/4zcBDVqlWzdmiSJBVRv3796NOnDwcOHCAuLg4PDw/8/f2L3EMklW2y3SseIQTJ332CKSuTMWFfMaGq+z3XCQoKkvcTroAS9IJrGeCgAXsNONhAJTWoVY/G5IFGk+BKBiz6bCmrPw2hfsAYEp8axvlU8/PVK0Fbd4VpLVS0dVc4lj2OkAULmBQ8nqA2hX9G/vv9MrWFmrMpgu8vmPjugomvo43YHVyO6YdQcrqNodLTw3jBS2FmSxXP1FawUyuF1nWnShoF3yrgW0Vh1pTxzP54LpOCxz/UayETQum+6P/+g8QV07Gt08B8QbldJWuHJElSMVOr1XTu3NnaYUilTLZ7xZLx249kHt9PtTfeRXMfyaBUcegNgkPXBT9eEez+x8Sxjcthdyj0GAOdhlnKadV5k8SMPcu5viWEx/uPpVVAENXsuPWjmH9rzY9dtVCjEg81G70Qgos3IeKGIOK6YNvaZcRtDsU3YAzN+gXicqt+Vy13PFZwtIGrGYKYmxCTJoi5efvx5TQwCGBBCMRHc2xdKMMCgmhbXaGtu4K3Y95Y/d8azqi3hj/Ua9vQWWFqCzVTW6g5kyxoOzMU3fVoXPeHcnHVCCrbFC3JDgoKon///g/dSSMTQume9OeiSFw5E9u6j+MyZJqcXUySJEmSipEwGVk8eQyL1qzjncAhjJwyA5Vd8R9rc278Q8qGJdg/1RP7Zv7FXr9UvghhvjZu9z8mfrwi2B8nyDSae8a611S4tD+UxPho3A+EsvLD4aQbID0HMgzC/NgAGQZYvi2UrGsX+PO7UJTOgSRlCZKzICX7jo3tMyeXNr3G0KjPMBo4KdR3UqhfRaGBE9R3UnDT3k7AMgyCo/Hm5C/ihvnnRqa5qgZOkLgtlJzr0VzaGIpHr0BOJ5tI1EOCHrJN+bdLjzG49xqGd2VzovekuwrvyuDtqHD45jhWfbqAycHjCWpf8qMjGlVV+Pi98Xw8dz6TgscXORksDjIhlO4q88/fSPz8fbQNmuHy36koNrbWDkmSpLvQaMwf6xkZGVaOxPpyXwMbGxsrR1I6cod5Zmdn36Pkoy13/8vLsFf92Uh0W1YQGvYFsbpMQhYtpl/aaex8H0fbsCXahq3QeHgX+R6/wpBD0to5qJ1ccO4rh39WZBPmLGHpohDUPceQ2m4YWjV0rKEwq5WKHjVVPFHNnJgtm3I7aXnBS1VoffWnmocrTpk0nqC+t1MLo0mQkm2e/KTjh6Fci4+m8i+htB8WxF86wVfnTVxKu12Pky04RSzn2uZQDN3GYOo0jMo28KSbwlA/FW2rKzzlruCiVViWdDu2oOdvb1MIQVoOJGaZh7w+/0Eo1+Ojqf1rKJe+fKvA+HuNHc6MsQ/X8/ewytowbEWIe9x8poxJTU3FyckJnU6HwWCQ16+VoMyTESSu/hBto9a4vDEZRZM/GUxKSpJtYGWyDayvLLVBVlYWrq6uuLq6Mnv2bHx9fS1JYkVhMBiIjo5m8uTJJCUlER8fj52dnbXDKjaFHQd1Oh01atSgS5cuTJs2DVvbivcFXnZ2NrNmzeKXX37h2rVrODk5lfg2H/b9nxMXQ8qWlWSdPYpt3cf57qYjC8PXMCZoKK+1eYyss5FknT+ByMlC5eSC1q8F2oatsPNrjvohbv2QsjWctH2bcH8ntNDbkZRXZekzuCw7csPE9EgTP7zeEOKjqVLTlw1H/qJD9bvPfnk/7tUGy5Ytu53A3ZEIZRry3rrhg+f8SLsaTbVavvx8/G8aVy3aNYuFbfdRVJT3gUwIpQJlHN9P0hdzqNSkHdUGTURRF3xCKT+ErU+2gfWVtTa4cOECgwcPZt++fdYOxao6d+5MeHg4devWtXYoxepux8Hdu3fTp08f9Hq9FSO0Lq1Wy5YtW+jRo0epbO9B3//G1CRSd64l/fAPaFxq4PTCYLRN2hfYAyhyssm6cAr92Uj0ZyMxxMWw9uRllp+MY/yEYN6aMOm+tqk/F0XC0ik49X4Tx6cD7jvW8qKsfQbnSskSONk+3DVzxSkyXjA90siOy4JGztD+3Ap2fx5iHiJZTElScbVBRUrgiptMCKViI0xGFgePJHTlF4zs24NxK9ej3GXYTVn9EK5IZBtYX1lsA5PJxLVr17hx4wYmk+neK5RzOp3O0hukUqlwd3enRo0aqFSFD3Mqr+51HNTpdMTExGA0Gq0UofWo1Wq8vb1LpWcw1/2+/01ZetL2buDmnu9RNDY49hxA5fbPoWjuf0izISWeBo89wcW463g52XP88wU49nz1rj2GxjQd1+cOx6aGF65BH6I8gu+JsvgZvOuyiWcnL0X9UyidXxvL9LFBtHVXSnWGzuOJghmRRrbECho4wYwWavrXLZkYymIbVDRFaYOKNY5Iuiv9maOkbF1J6ErzdQzL9h5nfDm5BkOSpLxUKhWenp75bgb+qJInI7c5OTnRtGlTa4ch3bJ06VLmfTCTYc3qMLBhdSp37E2V7v9BZe/4wHVpnN0InjaD+XM+5q3eXUk/8gPpv/9ElR7/R2X/F/Jd2iGEIPmbUDAaqTZg/COZDJZFNzIFb+wzYrsnlKzr0fy8JoSf6g/FTQu9vRRe9FLRrWbRh2kW5lSSYEaUkQ0XBb5V4IvOav7PV0HziNwuQip+MiGUyLl6kZQtK8g6F4Vt3caMD57IwpWrGR880dqhSZIkSVK5Nu/9GVyMu0FYTjbj12xB4+pRpPrunIzCeDOZ1J1fotsWTtrBbTg9P5hKzfwtQxTTD+1Af+owLm/OQO3kUtRdke6DEIIh+42YBMycPI6lCxcQPGEczXur2Rwj2BRjIvycEQcNPFNb4UVvFV09zbdJeNCEzWAS/JOO+VYKaebfm9Ys48Q3oVR7YQyfBw9nUH2ZCEr3JoeMVmBGXSK6nV+QceRHNK4eOL0wBO0TbR9orLv8Vt76ZBtYn2wD66tIbSCPg2XL3f73DInXmP9/PVn+xxWCZ35QYtdE5VyLRbftc/Snj2Dr1RCnF4eh0tpzPWQUDm16UjWg4NkVHxU3EpLAoSoJevPMkol6SMiCRL0gQQ+HvlvGyW9DGTduHDNLeDbJpX8aGXHIxLYeap4vYGZOIQRnU2BzrIlNMYLf14dZbovg1H0YVW1v37cv915+Ve3g3JYw9qwOoVH/Mdg9HUjMTXMyaLzjLN7DHpLGNyLrWjR1fHyJvXC+RPf1ThXpM7isktcQSg/ElKUn7Zf13Pz5exQbO6r0HIBDu2cf6DqGXPIDwPpkG1ifbAPrq0htII+DZUth/3tCCBKWvYvh+mWqT1qGSutQ4rHo/zqGbstKcq5E8+XZG4T9fp7gmbMZPnJkiW/bGg7EmQiYtpQbW/LfPF0BqtqZb1QeO9qcJOHmy/u7zhHcVIWtuvh7zf5MFrTcZGCwn4rP7vN+drV86nElJhqX2r5M2n6OpCxI0kNytjA/zhIk6SH2nUaIG9HY1vCl31dnLffTM/9WqFMZKmkUq03KUpE+g8sqmRBK90WYTGT89iO6/63BlH6Typ36UKXbf1DZV37oOuUHgPXJNrA+2QbWV5HaQB4Hy5bC/vfSD/9A8jehuAZ+gLZRq1KLR5hMZBzdQ+NnXyYmUYevjzfnL1wste2XhhyTYFaUidnHTdhMNSd7brV92fTbX7hozcMvq9revl3BsmXL+GjufBr1H8tP9YbyeFUI76imlVvxXVOZZRS02Wwg2wRH+2qwv8/rA+83gSvrs29WpM/gsqoobSCvLq4gDEnXmf9/PXm8V1++js2gxpQVOPd+s0jJoCRJkiRJ+Rl1iaRsXo59626lmgwCKCoVDk92Z+IHH+Pr411q8wHkmARdg5fgVKser81cwq/XTSTqi7/P4UKqoOM2Ix8dNzGzhYqQqeOo7eXDrCnjaV9DRUNnBVdt3pk0g4KCiL1wnl0fj+BoXw1qBdpsMTLhiJEMQ/HE+O7vJv5Mga+fvv9kMDe2mAvn75nk3W85SXoYsofwESeEIOP3PaRsXEKH5T8Sk5xWrN8Wym+ErE+2gfXJNrC+itQG8jhYtvz7f08IQWL4LLJjz1Jj0nJUDg8+m2h5NP+EkQnd/CDePDSTD88A5iGbfk4Kfk7g56zg56TwRDWFulUefMjml3+bGHHIiKsWvu6i5qnq5n6NB33/G0yCBSdMzIgyUdMBVvir6eL58H0kP10x0f1/Rua3UTGuScWcnb0ifQaXVbKHUCqQMU1H0uoPSf56PpUat2XCjA9K9dtCSZIkSapoMo/tR38qgqoBb1WYZPCfNMGMSBOdXx+Ll48vi6aP43g/Dd8+rWbU4yq8HeFEErx/zMSL7y3Ft159ag77jJmRRk4lCe7VN6HLFgz8xcCgvUZe9FI43k9jSQYfhkalMLGZmj/6aahpr/D0DiPDDhhIyXrwPpJEveC1vUa61VQY84Q8rZbKJ9lD+IjSnzlK0roFYDDg3H8U9s38S2Q78hsh65NtYH2yDayvIrWBPA6WLXf+7xnTUrj+cSB2vk/g8t/3rBxZ6Xllj4F9cYJz/TU42Rbe8yeEoE7d+vwTE42Dhy/q2WdIzQE/JwjwUfGSj4pmLuSZ7fzX6yYG/GwkKQuWdlDzar38SVdR3v8mIVh+xkTwbyZU+5dj81MoMyaN460R956NVAhBvx+N7L8mOPmSBk+Hint7h4r0GVxWyR5CycKUpSd5/ackhL2Hjacv1ScuK7FkUJIkSZKk21I2LgOTCedH/DYPd/rpionvLgjmt1HfNRkEc6L37sTxePn4Mn/aeG4M0rCjp5p21RWWnDHRYpOBet8amHjEyG83TMyKMtJxmxFPB3OvYEHJYFGpFIWgx9T8GaAhe1coCZejGTVzAVOPGom9efc+k5XnBJtjBSv91RU6GZTKP5kQPkKyY89xY/5bZBz5EeeAkbgGvi9vRCtJkiRJpSDz1GEyo/bi1C8ItWNVa4dTKrKMgrcOGelYQ2FAvftLiO6cHMVOrfBsHRWfd9JwfaCGH55R062mis//MtFm7FKm9/KjV/QK9j2vxuchrjl8ELUqK4RMG4+Hly8dXhvLolMmfL4x8NwuA9tiTRhMeZPDcymCdyKMDG2o0NdHnk5L5Zv8D34ECCEIHTWYBk1b8MWxC7iP/5TKHZ5/oBvMS5IkSZL0cEwZaSR//wnax1pj3/Jpa4dTakJOmohOhc/aq4t8zmGjUuhRS0WYv5q4ARqqHwiF+GhOfRuCRlU65zNBQUFcjTnPvnkjuDpAw3J/NTcyofduIz7fGJgZaeRKuiDbKBjwi5FaDhD6VMWcREZ6tMiE8BGgP/kri774hlhdBssjL2JTvba1Q5IkSZKkCkO3dSVCn4nzy6MqzJexsTcF70eZeKexisbVinefNSqFGZPNQ0snBY8v1rrvV2UbhTcbqvi9r4ajL2p4prbCvBMm6qwz4DN8CZGBDekXuwIHm4rR3tKjTSaE5ZwpW0/KpjBGPtfFPIPoxEnWDkmSJEmSKgzjhZOkH96FU+8haKq6WTucUjPmsJGqdjC9RcmcSpal++61dFNY7q/h6gANn7VTkbDN3Hu5bmmItUOTpGIhE8Jy7uZP32K8mcw7i1Zw/sLFMvHBKUmSJEll0WdzZ+PjXo2FY4aRc/XiPW93cC+mrEyyt6/Ern5THNo+U0xRln07L5vYFCMIeUqN4z0mknmUVLE1T0CzaLp1ey8lqbhprB2A9PBy4q9wc896HLu+jMbV09rhSJIkSVKZtiAkhJj4ZBau/ooAcQmVkwtav5ZoG7bEzq85aocq96xDCIEpLQVD/FU+fX8qn2z6gQnvTeUtVcX4jl1vELz9q5Gungr961acZPBOQUFB8gt46ZEiE8JySgiBbuMy1FWq4titv7XDkSRJkqQyTQhB4JP1CTsCE6bNwLVbO/RnjqI/G0nGb7tBUbCt44ddQ3OCqHZywZAQhyHhKoaEOIy3fhsSriKy9QB8smk/sbpMQpeE8db4inHJxtwTJi6lwfaeRZ9IRpKkskEmhOWU/tRh9Gd+x2XIdFS2WmuHI0mSJEllWk7cRQbUdWL0nF1oG7UCQOvXAgBDSjxZZ6PQn40k7cAWlsz7mLCoiwS28GFQUy/U1dzRuHhg690I+9Zd0bh4oHHzJLjeNubNm8/44InW3LVScyFV8NFxE+OeUNHQWSaDkvSokAlhOWTK1pOycSnaRq3RNn7K2uFIkiRJUpmnPxGBorXHrn7TfM9pnN3QPNUTh6d6IkxGVnp7EavLZGX0TSbt3oKisSmwzhEj3+Y/rw6gWrVqRY6v95Ql/PB5CGPHjeWjCSOKXN/9GP3xElYsDqHfsLEsnDQcV23hSZ4QglG/GnHTwnvNK8bwWEmqKOQ7uhy6+dN3GFOTce43XA7XkCRJkqT7kHnqV7SNWhea3OVSVGomTHkPXx9vJkyecs/yxeFkkmDbyhCyr0fz8bwQpkcayTEVbcKbe9FlCz4NDSEzLpqvloRQ40sD3XYYWPqnkWsZ+be97ZJgx2XBwrZqeasFSXrEyISwnDHEX+Xmz9/j2DUAjZucSEaSJEmS7sWQdJ2cf6Kp9ES7+yofFBRUajN3m4Rg+EEj1fuMobaPL8+/OZbZx0y02WzgVFLJJYWTfjNh02sMNb19mfveOJa0V6NS4O1fTXh+ZaDTNgOLTxn5J02QYTD3DvaqpdDXWyaDkvSokQlhOSKEIGXTUtSOVXHs9oq1w5EkSZKkciHzZASobdA+1sraoeSz6pzg0HXBNzOHc+nCebbNHsHhPhqyjNByk4G5fxgxFnNv4YE4E8vOmJgfHMQ/F88zYdRwhjVSsftZDdcHagjvqKayDYw/YqL2OgO1A5cQO7oRbc+ukCOTJOkRVOIJ4d9//027du1o0KABrVu35vTp0/nKmEwmxo8fT+PGjWnYsCFDhgwhOzu7pEMrd/SnD6P/83ec+wbKiWSkCskkBHqD4Ga2IEkvuJYhuJwmuJAqOJsiOJVk/n0xVXA13VwmPUdgeICTKZMQ5JgEmQbzth7mRMwkzNtN0Jvj+ytFcDLJHN+fyeYY/0oR/K0TRKea4429KbiUZo47Ocu87aLeI00qG+Rx0Pr0pyLQNmiGSutg7VDyiM8UBP9m5LX6Cp09b5+StXRTiOyrYXRjFZN+M9Fxu5HzuuL5PNAbBMMOGmnrrjC8Uf7TQBetwn/9VOzopeHGQA1rO6vJ3Gm+EfvnnywolhgkSSpbSnxSmcDAQIYNG8Ybb7zB+vXreeONN/j999/zlAkPDycqKoqoqChsbGwYNmwYixYtYsKECSUdXrkhsrNI2bgMu4at0N7nkBep4soxCW5mQ2oOrAxbxspPFjBgxFh6vxaESWD5Edx+vGPtMr4PC+GFN8fS/pUgMg2CTCNkGrj92wDHNy3j5DehNHh5DLWfDSTbBDm3fsyPBTkmuLFrOUnbQnHvPYbqzwxDoyioFdCosPy++r8wLmwIpU6/Mbj2DCTLCFlGgd6I+bHJ/Dt9z3IMu0KhxxjoNKzwHd+3HHYXXE6lgJ3KXCZrVyjaZ8Zg02UYBhMYBZbfooC6lE7DsFGBrRpsVLd/bFXm2BK3hmD/7FiUTsPQG82vw/3GdbdyCqBVQyXNrZ9bj2/+uJy4LaHUfOUopawAACAASURBVHEMNZ4JLLS6azvDuLL57uVyy9R6cQyezwaiUszfFKoUUO54fPl/YUSvD8X7pTF49ArEIPK+bgaTwCAg4YflJG4Lxa33GFx7DkPFrXoUUKGgUiD+hzCubja3u/dzgZbX0vK63nqdz21dzrGvQ2j16lie6Fv4fv65OYzT385n2uTgMnlvMHkctC5jeipZ0SdxDhhp7VDyCf7NiADmtVHne06rUZjbRk1vL4XX9xpputHAvDYqghqpUBWhl272cRPRqXCsrxq16u71ONspDKyvkDZtPB/PnS9vxC5JjyhFlOBX0Ddu3KBevXokJSWh0WgQQuDh4cHBgwepV6+epdzIkSPx9PRkypQpAGzcuJEZM2Zw4sSJfHWmpqbi5OSETqfDYDAUy8xe5YFu51pu/vQd1Sctw8atprXDsUhKSqowbVCS9AbB5XS4lCaITYONq5ex/4sQWrw6loa9AzEK88m20QSGO07C/94Wxl/fh1C9z1i0Tw9Dlw2p2eYEzuLdRhAfDW6+8OGZwoP4Vzlb1e0k5M6k5PSIRmRdi0Zbw5duq89aTuRvn9Ar2Kph9ct+3LwajaOnLwO+PWdJGHL3wShg26CGZMZFY+/hy8tfn0WrVrBTc/tHBVoNzH7Oj+R/onGp7cvCPX/lS8pyH/dv24BrsdG41/Hl8wN/3U4sbyWaWSaY8YwfKf9E41zLl3d3nEOj3E5QNapbSasC47o1IOGyeZuzf/iLHJOwJL/ZRsgR5sef9fUj9Uo0TjV9ee9/58yvlVrJk8C90aEB1y9FU8PLlw1H/io0KR/YvgE3LkXjWtuXkD1/3UrGhSUp19/6veplP9KuRuPg6Uv/r/+fvTsPb6pM/wb+PUnadKEpdIFSWqCUrQoIKKKIG+444jC+o6ODihsUcMZBkaozzriNoiL8VBCKSh1lUGfccAFUXJBBEJGyitAWahfo3iZtk2Y553n/CKAlSTeSnDTn+7muXpWcp+c87bG5e59nuX/yeUv/c9NwNLfT7tdtfrfqJyjw7JcigHW3/nKvrl31U+vkXpJO/Hfer+779P8egBA4cU73eQXe/MNwd//7ZmLy6z+d9EDhl//emT0czsoiRPTJxPClvv/fPTAnC47KIgzIyETxocK2ftWCjnFQfc3bNqD+zefQ99F/Q2/y/8+qq3Fw41EFF30sY8X5etw1vO0JW01OgZxtCl76UcHwXSvQ+Mli/C1nXqcfgOytExjzngsPjdHh0TM9k9Duin+LqI/3QH2ndA9EAG3fvl0MHTq01Wvjxo0TX3zxRavXVq5cKcaPHy/MZrNwOBzihhtuEHFxcV7PaTabBQBRWloqDh8+LMxmszCbzaKlpSVg34fanNXlovS+34iGj/PU7oqH2tpatbugOrtLEdU2RRSZFbGvThE7qhXxbYUsviqXxfoSWawplsV/imTxxkFZTPvHUtErLVNMun+puO5zpxj3vlP0ecMhsKL1h653pgAgIvpkijPfc4qz33eKCWuc4vwPneLij5zi0k+c4sq1ThHd192uR2qmmLfVJR77wSX+b49L5B2QxbuHZPF5mSxynlkqUgdkikeee0kUNLj7ecisiGKLIkoaFVHWpIjyJkUseP4lkTYwU/zfkpeES1Z8fr/Lli0TAzIyxbJly9r8uXSknT/PpeY10wdkhNz32dF24XLN9u6BWhgH1Vf96qOicvFfAnb+rsRBu0sRWf9xiHM/cApZ8f1+e7JPS2WhPxYf+g7I7NQ1XbIixn/gFMP/4xAtro5fszvg3yLq4z1Q36ncg4COEP7www+46aabcODAgROvnX322ViwYAEmTZr066QUjz76KNasWYPo6GhceumlWLJkCerq6jzOefzJ6Mnmz5+PnJzwLAzb8uazEJUliJr9LKQQWztYX1+PXr16qd0NvxECqHVIKLNKKLPqUGaV8OlbK7H97RcwcOo9SLjsLjQ6JfeHC7A4JdiVY1NuOjIt8NgonKH3IJy3Yi/SYxSkxQj0i1aQHiOQFqMgNVpg9et5eP7FpbjnT3Nw2223+exvXl4eFj3/Iu69509ttqPACrffg+6oM/cgmE+xGQfVJZx22J6diYgLr0PEedcE5Bpd+f3/vwOReHJfJL6cZMWInifPMW/b8ytew1P/txSGK+bivb9Nw9mJHfv6FYUReHBXFD650IpzkuT2v6Ab4Xuw+ngP1HcqcTAkpoye7K233sLSpUuxadMmj2PHA2FpaWmrqTJGoxFGozFQ34pqXvj7fCx6YQnuvXs2/vzEQrW746G7TRGQFYFyK1DcKLByxXJ88PIijLhhLmIumYmSJoGSptbTLaP0gOuhLLgqixDdNxO/XfUTTBES4iMBUyRgigBMke5/z7zQPd0vZUAmPt9RcGLaY9Svpj+ufHk5nnn2OTwwv/NTfXzpbvcgHPEeqC9U7wHjoLpse7eg9pVH0eehVxDROy0g1+js/3uHLQKnv+PC7NN0WHhO16Ztmh0C134m47sqgf9cosc1A9qeclrS5L7mtME6LJsYPlNFjwvV338t4T1Q36ncg4BuKtO7d2+MHTsWq1atwvTp0/Huu+8iLS3NIwi2tLTAZrOhV69eqKmpwYIFC/D444+3eW6TyQSXywWTyRTIb0E1irURls/fwqIXluJnsw0vrH43JBPCUGOXBY40A0uWLcO/li7CudPuRfIVM1DcCBQ3CZQ2udevAQCWLgKqi/D96sW45qqZmJyuQ/8ewIAeEvr3kNC/B5AUBeQ6fllMnz3J969MxYO/tBuR4H2h/uxZszB71qwAfOdEFIoYB9Vl270Fhj79A5YMdpYQAnd/KyMxCnjkzK5v9B4fKWH9lXpM+1rG1M9lrDgfuH2Y9/OJY3UOTRHAgrNZbYyIPAV8l9Hc3FxMnz4dTz75JEwmE/Ly8gAAd955J6ZMmYIpU6bAbDbjoosugk6ng6IouOeee3DNNYGZ2hHqhMuJps2foPHTf0O4nLjnlhuw5KMvMW8+pwE5FYHyZqC0SaCsGShtdid4Zc3uDVnKmgUqbccaP+dO9ta+ugjjxs/AwB7A2b11GNgDGBgnYWCchM8c92HRc8dG6y71/auQnZ3dodG8jrYjIm1hHFSHkGW07NuK2HOvUrsrJ7xfLLC2VOD9y/ToEXFq9fyiDBLenqTHn75VcMc3MiqsAg+O1nnUCXz70C/XjI9kDUEi8hTQKaOBEK67qwkhYNu9GeaPXoVcW4nYc66A6aqbA7Ijmj/5c4pAg909ZbOkSaCkWeDnRqCk+ZfXjlgB5etf1unFXzYDabFAeqyE9B5AWqyE9FgJabHA5v/kYuWS5/CQH6dmhipO01Af74H6tHQPwjUO+pu9aA+qX7wfvec+j8gBwwJ2nY7+v9foEMh6x4WxiRLWXK73W4F3IQSeyFfw9x8U/Ol0Hf7v3F/KUtS2CGT914UL+kp4p40Hn92dln7/QxXvgfpCdsoodYy9+CeY16yA4/CPiMoah6Q7/oGIvgPV7lbANDoE9tUL7K2Hu1h3vcC2d3LR9MkvG7IYJCD92PTNwSZgUqoO/XtI+Nvji1FVXYT0bxejZNUcn9e47N5ZeOReTs0kItIq2+5voYtPRET6ELW7AgD4xw8K6u3AixP8lwwCgCRJeHisHn2iJczaLKPSJvD6RXoY9RLmfSfDobivSUTkCxNCFblqK2D+OA+2/I2ISM1A0qwnETVsrNrd8qunX1iGhQufw1k33YuIi2dgb73A4Ub3MZ0EDDYBIxMk6D9fDFQXIeV/i/FD7mz0iYbXgrnygyyOS0REbRNCwLZ3C6JHnAtJp/66ufwagef3KXhqnA4D4gIzbXNGlg69o4E/fClj8noZd5+mw2sHBVacr0ffGE4VJSLf1H+X1KgXHp6PwZmZePk/76HXjfei97wlYZEM2mWBL8sV5HwnY/S7TjzwxHOoKS3C53mL4FSA/5ehw78u1OOHqQY0TTfgwPUReOdSAxb8bR4GZGTiHw/MQ2qs5DUZBNzr9IoPFYb9NFAiIuo659HDkGsrED3yXLW7gpeWLcM5I4egz7YVmDsysH92/XagDp9dpccPNQK/e3gZjH/PgvOr3IBek4i6P64hVIFw2DEoJQnF9U0YNHAAig4Xq92lLqutrUOdoRc+LVPwaZnAV0cEml1An2jgijQJ+m9W4PO8RfhrTviv5VML5+2rj/dAfVq6B+EQBwPNsv7faPz6XaQ+8TYkQ0RAr9XW/3tVNoEBg4agpaIIffpnouLnwoD25bg9dQLjRwyB7WgRBmRkovhQcK6rFi39/ocq3gP1cQ1hN9P4zRrMGDMArxRacH/OA2p3p1OanQJ76gR21gqsenU5tq5aBPky93TQiX0kPDxGhyvTdRiV4F7XgItmA3+frXa3iYiomxIOO2AwQNJ1fB2cbe+3iDrt7IAng235slzBtK9lGK6ci95fLsYjDwZvqcPIBAmL/s4lFkTUMUwIg0xuMqNxw1uYNXs2HrwutBOlCqs78fv1x0EzIADoJUD378WQqw4hedNiHMqbfcpbaBMREZ1s4fTf4qVPN+P+hx/B7L/c2257V10lnGVFiLvkhiD0zsv1FYFHdyj4Z76CSakS3lg6G31jfG+CFigshUREHcU1hEHW+Om/AQBxV/xR5Z54d9giMPG+pdD3GYy+M17CVetlPLlTwRErcHmaDq9c8Mv6vxcemYf0ARl47MF5TAaJiMjvFHsLlq79BsV1jXj28Ucgm2vb/Rrbni2APgJRWWcGoYetlTUJTPrEHTcfP0uHT6/ihi5EFPo4QhhEzupyNG3+BKbJt0LfI17t7rRS0iTwz3wFKw8oUFYthlJVhORvFmPrS7MxMA4nahr9WnZ2Nq6//nrOGSciooBwHNqLmWMH4pWD9bjrjDRUL5mP5DlPQ98zyefXtOzdgqiho6GLig1iT4GPflYwfaOMGAOw8Td6TEzhM3ci6h74bhVElo9WQh+fiLgLrlW7KyeUNwvM2Sxj8NsuvFesYMHZOiz++30YkJGJxx6ah0EmyWsySEREFGj2wt24deIoFJaWY96/PoBwOVH14v1w1VV6bS83W2Av2oOoIO4uapeBv2yRMeUzGRNTJOz6nYHJIBF1KxwhDBL7oX2w7d6MXtPuhxRpVLs7OGoVWLBTQe5PCmINwONn6jDndJ176ueoWfjzHBZ1JyIidbUU7IRxyGhIkgRDUiqS//QsqpfmoPrF+5E852kYkvq2br9vGyAEokecE5T+FZoF/t/XMdhvUfD8uTr86XSdX4vOExEFAx9hBYEQAuY1LyMibTBixl6sal8qrQL3bZUx6C0XXi9Q8PAYHQ7/wYCc0XquAyQiopChWJvgLC1E1JAzTrxmSOiD5D89C8kQgeol98NZXd7qa2x7v0XkgOHQmwK/lOGdQwpG/Pkl7Lv7dNxX/wr+PELPZJCIuiUmhEFg27UJjp9/Qvy1d0HSqfMjtzgErnrgJfTNGIKXli3HA2foUHyjAX8do4cpkgGMiIhCi/3QHkAoMA4+o9Xrhp7JSL77GUjGGFS/OA/OihIAgOJogX3/D4geOSGg/XLIAnO3yPj9FzL0ny+Gq+oQVr/0XECvSUQUSEwIA0y4HDB/lIeo08e3esoZLE5FYOk+9xrB9SsXQVQVIXnTYvzjTD3imQgSEVGIshfshr5Xb+gTUzyO6eMTkXz3M9DHxrtHCo8chv1gPoTT3ub6wedeXIaE9MF49oVlXepTaZPARR/LWPqjghcn6LDw4fuQPiCDtf6IqFtjQhhgTf/7GHJdJeKvuT2o1xVC4P3DCka848KfvlVwdX8JT/7VvVnMQwxcREQU4uy/Wj/ojT6uJ5LmPA19fBKql+ag8av3YOjTHxG903ye84mnn0N9WRFy/vkcHtgmo6ZFdLg/n5UpGPu+C2XNApuu0ePu0/WYNWsWdu7Yznp/RNStMSEMIMXaCMtnqxF77pWISBkQtOtuqVRw/kcyfrdBRkachJ2/MyDvQgMevGcWig8VMnAREVFIk5sa4DxyGMYho9psp+8Rj+Q5C6BPTMGr76/FhOffx/Lly322T7xmLqJSMnHFbfdi6Y8KBr7pwoPtJIayIvDoDzKuXCfjrCQJ+b8zYHxv/vlEROGD72gBZPn8LcDlgunKaUG5XqFZ4PcbXJjwoYwmp8BnV+mx/ioDRiVyaigREXUf9sI9AABjB5Za6GLikDzrKbz8YxUOV9Zg4TNPe23XYBcoPnMGFn9xAOsWzMbhPxjw5xE6LGkjMay2CUxeL+PRHQoeO1OHT67UIzGKMZWIwgsTwgBx1Vag6ZsPEXfJ7wO+25lLEbh0/ksYMmQINqzOxb8u1OOHqQZclsbbS0RE3Y+9YBcMyf1g6Jncofa66FjMf+RxZGYMxLz5OV7bfF4uIAvgqnR3bEyKkvDkOL3PxHBLpXuKaH6twGeT9fjbWD3r8hJRWGLGECDmj/Ogi41Dj4uuC+h1hBD4yxYFX7y2CKgugunrxbhlqA56HYMWERF1T+71g53biC07OxuFhw77XBaxrlRBVk9gQFzr+HhyYvjiPgX9ZryECSOHInLTCuT/zoBL+/HPJSIKX3yHCwDHzwdgy9+I+Mm3QmeMCui1XtinYOmPCv44+14MyMjEg9wwhoiIujHZXAtXVRmMQ0b77ZxCCKwvEydGB705nhgW32hA9JeLgeoiuD5djH6xfMBKROGNCaGfCSHw/P134/w3vsXrO4oCeq01xQrmblEwf5QOqx6ZzQ1jiIio27MX7AIAGAe3vaFMZ+yuA45agavS20/ukqIkLPjbPD5kJSLNYELoZ47i/Vj62WYU1zXiuYXPBuw626sV3PSVjOsyJDx1Nm8jERGFh5aCnTD0HQh9XE+/nXNdqYJYA3B+SsdG+7Kzs/mQlYg0g5mEn9l2fI3s80Ygc6Dvhe2n6udGgWs+lTEyQcLrF3GROxERhQ97wS5EdXL9YHvWlQpMSpVg1DNeEhGdzKB2B8KJUGRYd21C9p134IGpMwNyDbND4DefuhClBz68XI9oA4MbERGFB1dtBeS6Sr+uHzQ7BDZXCrw4gc/AiYi8YULoR/aivVAs9Ygec0FAzu9UBH6/QUZZM/DtFAN6RzMZJCKi8GEv2AVIOhgzR/rtnBtOKjdBREStMSH0I9uOr6Hv1RuRA4b7/dxCCMzZLOOrI+56SFm9mAwSEVF4sRfsRERaJnQxPfx2znWlCob3BAbGMW4SEXnDx2V+ImQXbLv+h5ixF0EKwJq+Z3crePkngVcu0OPiVN42IiIKL0IItBTu7nT9wfbOub5M4Mo0xk0iIl/4Dukn9oM7oVgbAzJd9L+HFORsU/DwGB1uHcpbRkRE4cdVVQbFXIsoP64f3FsPlDd3rNwEEZFWMbvwE+uOr2FI7oeIfpl+Pe/8Z17C9ecMxbgfV+DRM3m7iIgoPNkLdgE6PSIHne63c64rVRBjAC7oYLkJIiItYobhB8LlgG3Pt4j283RRIQQWL1oEVBehcs3igExFJSIiCgX2gp2IHDAMOmO03865rlTg4r4SorgjNxGRT0wI/aBl/w8QLVbE+Hm66MajAq5L56JP/0w8OH+eX89NREQUKoSiwF64x6/rBy0Ogf9VCE4XJSJqBxNCP7Du+BoRfQciImWAX8+7fL+CYVNm4GhxAbKzs/16biIiolDhrCiG0myGcbBnQnjd315CYvpgLF++vFPn/KJcwMVyE0RE7eK75ClS7C1o2bcV0WMu9Ot5K60C7xULZGfpOFWUiIjCmv3gLsAQAePALI9ja15ehLqyIjz+9MJOnXNdmYKh8cAgE2MoEVFbmBCeopYft0E47Ijxc0K48qACvQTuKkpERGHPXrgLxozTIEUaW71e3iwgXzYXSM5E4jVzO3w+IQTWlQqODhIRdQDfKU+RLX8jItKHwJCc6rdzyopA7n4Ff8iU0MvIJ5tERBS+hCzD7qP+4PfVArhwBhZ/cQB7Rs7AJyVKh875Yz1Q1gxclcYYSkTUHiaEp0BpaYbtx21+Hx38tEzg5yZgVhZvDxERhTdneSFEi9Xr+sFt1QJ9Y4B7RuhwSaqEe7bIaHGJds+5rkxBtB64sC8TQiKi9jDjOAW2PVsAl9PvxeiX7VcwJhEYl8xARkRE4c1esBtSZBQi+w/1OLatSuDsZAmSJOHFCXr83Ags2tP+KOG6UoGLUllugoioI5gQngJb/jeIzDgNhl69/XbOnxsFPikRmHWanpvJEBFR2Gsp2InIQSMgGSJava4Ige+r3QkhAGT1kvCXkTo8ka+gpMn3KGGjQ2BTheB0USKiDmJC2EVKcyNafvrB79NFX/5JQY8I4MZMBjIiIgpvwuWE49BeRHlZP3jQDFicwNm9f4mHD4/RoacRmLdV9nnOL48IOBWWmyAi6ii+W3aRbff/ACEQPfp8v53TIQu8ckDBLUN06BHBhJCIiMKbo+QAhMPudUOZbVXuUcCzkn6Jh6ZICc+O1+O/hwW+KPc+dXRdqcBgEzA4nnGUiKgjmBB2kTX/GxgHj4TelOC3c675WaDSBmRzMxkiItIAe8EuSFGxiEjL9Dj2fbXAsHig50m7bd+UKWFiHwl/+laGU2k9dVQIgXVlCkcHiYg6ge+YXSA31sNesMvvxeiX/ahgYh8JIxL4VJOIiMKfvWAXjINHQtLpPY5t+9X6wV+TJAlLztPjgBl4cW/rUcL9DUBJE3BVOuMoEVFHMSHsAtuu/wGShOhR5/ntnD81CHx1VGDWabwlREQU/oTDDvvh/TAOGe1xzC4L7KwVrdYP/toZiRJmZ+nwyA4FR62/jBKuL1UQpQcuYrkJIqIOY/bRBdYdG2EcOgb6HvF+O2fufgVJUcB1GQxiREQU/pY89RgmvvoFXt+y1+PY7joBhwKvI4THPXaWDkY9kPPdLxvMrCsTuKivhGiWmyAi6jAmhJ3kaqiG49BexIz133RRm0vgtYMKbh+qg1HPIEZEROFv8bJc/Gy2YXHuqx7HtlUJROjcI4G+9DJKWDBOjzcKBf5XoaDJKfDNUcHpokREncSEsJNsOzcB+ghEj5zgt3O+fUigwQHM4GYyRESkAUJRMPOsQcjonYh5OTkex7dVC4xOlNp9SHrbMAnjkiXcvVnGhnL3qOKVaYylRESdwXfNTrLu2IiorLOgi4712zmX71dwRZqETBOfahIRUfhzFO/HHwcnYP+3XyM7O9vjuK8NZU6mkyQsPU+H3XXA3d/KGBQHDPHfag4iIk1gQtgJrtoKOEsO+HW6aH6NwHdVgqUmiIhIM6z5G6GPT0Jkxmkex8wOgZ8a2l4/+GvjknW4c7iE8rUrUH1fFnJzc/3dXSKisMYspBOs+RshRRgRdfp4v51z2X4Z/WKB3/Tn6CAREYU/ociw7dyE6NHnQ9J5/hmyvdq9a6ivHUa9eXKcHhEbFqPxSBEWPLPQb30lItICJoSdsHz5ckz81yasyPuXX85ndgisLhS4a5gOBh0TQiIiCn/2wj1QGusRM/Yir8e3VQuYIoChnZj6mRQl4YVH5mFARiYemD/PPx0lItIIJoSd8NIX36O4uh4Ln3naL+dbVaCgRQbuHM7bQERE2mDL3wh9Ygoi+g/1enxblcC4ZAk6qXMPSrOzs1F8qNDrmkQiIvKNmUgHKS3NmDlmADJSUzBvvueOaJ0lhMDy/QqmDJDQL5ajg0REFP6E7IJt1/8QM/oCSD4Svm3VvgvSExGR/zEh7CC5vho3j0rH/k0b/PL08f5nlmHv7Cykb1/hh94RERGFvpYD+VCsjYj2MV20vFngiBUYl8SEkIgoWJgQdpDcUAMA0PdK9sv5lj2/CKguwgcvL/LL+YiIiEKdLX8jDL3TEJGa4fX4tqrObyhDRESnhglhB7nqqwBJB70p8ZTPVWEVaLlkLnqlZeJBLn4nIiINEE4HbHu+RfSYC9ucLpoaAy6lICIKIoPaHegu5IZq6E0JkPT6Uz7Xyz8piJo0A0WvzEYvI4MeERGFv5b92yFarIgZ47uWb0cL0hMRkf9whLCD5Ppqv0wXdSruzWT+OFhiMkhERJphzd+IiNQMRKT093pcEQLfc0MZIqKgY0LYQXJDDfQ9Tz0hXFPsXjA/57RTH2kkIiLqDhR7C1r2bUV0G6ODBxqARic4QkhEFGRMCDtIbvDPCOGSHxWcnyLhjEQGPCIi0oaWfd9BOOxtThf9vtq9ocxZTAiJiIIq4AlhQUEBJkyYgKFDh2LcuHHYt2+fRxtFUXDvvffitNNOw6hRo3DxxRejsLAw0F3rMCEEXA3VpzxCuLdOYONRgTmnMQ8nItKKcIiDp8qavxER/YfCkNTXZ5tt1QLDewLxkUwIiYiCKeCZycyZMzFjxgwcPHgQOTk5mD59ukebDz/8EJs3b8auXbuwe/duXHLJJXjooYcC3bUOU5rNgNMBQ8+kUzrP0h8V9I0Bpg5ksCMi0opwiIOnQrE1o2X/922ODgLcUIaISC0BTQirqqqwfft2TJs2DQBw3XXXobS01OOppyRJsNvtaGlpgRACFosFaWlpgexap8j11QAAfa/eXT6H2SHwRoGCGcN1iNQz4BERaUG4xMFTYduzBXA5ET36Ap9t7LLAzlomhEREagho2YnS0lL07dsXBoP7MpIkoX///igpKcHgwYNPtLvmmmvw1VdfISUlBXFxcejXrx82btzY5rktFgtcLteJcxuNRhiNxoB8H/4oSv+vgwrsMjBjOKeLEhFpRbjEwVNh27kRkYNOh6GNGLqrVsCpsCA9EZEaQqIO4fbt27F3716Ul5fDZDLhgQceQHZ2NlatWuXza9LT01v9e/78+cjJyQlI/5zlxYDegAaHDKmurtNfrwjghT2xuKafjCh7I+rs/u+jWurr69XugubxHqiP90B9nbkHCQkJAexJ14R6HOwqYW1Ey087EHH5NNS1ET+/Ko5ApM6INDSgC2FWVfz9Vx/vgfp4D9R3KnHQjQyRDAAAIABJREFUIyH8wx/+gDvvvBOXXnrpKXcsPT0dR48ePfEEUwiBkpIS9O/fugbR66+/jkmTJqFnz54AgFtvvRWXX355m+cuLS2Fy+U68Q0F8slog6MZSs9kJCZ2bQ3h52UKippk5F0UgYSEGD/3Tn2h+MeV1vAeqI/3QH3+ugeMg/7TdOA72IRA0oQroDf5vj/7ml0YnQj0Te6ev0f8/Vcf74H6eA/U19V74DF/8fDhw7j88ssxcOBAPProo/j555+73KnevXtj7NixJ55wvvvuu0hLS2s1TQYABg0ahC+//BIOhwMA8PHHH2PEiBFtnttkMrX6CGQQlOuroT+FDWWW/KhgVAIwMYVTYYiIQh3joP/Y8jfCOGRUm8kgwA1liIjU5JEQfvfdd9izZw+uu+46LFu2DJmZmbjsssvw1ltvnQhUnZGbm4vc3FwMHToUCxYsQF5eHgDgzjvvxIcffggAmDNnDjIyMnDGGWdg1KhR+OKLL7Bs2bJT/Nb8R26ohqGLG8oUNwp89LPA3afrIUkMdkREoY5x0D9kSx3sBbvbLEYPAA12gQNmrh8kIlKLJIQQvg7KsoyPPvoIeXl5WL9+PXr06IGbbroJd9xxB0aPHh3Mfp5gsVgQHx8Ps9ncaqpMIB199BbEnDUJ8VdP7/TXPrBNxvL9CspvMiA2IvyCXV1dHacIqIz3QH28B+oL1D1gHOy6pk0fouH9XPR9/E3oY00+220oV3DZWhk//d6AYT27X5zk77/6eA/Ux3ugvlO5B21ueanX6zFlyhTcfvvtOOuss1BfX4+8vDyceeaZuPDCC3Hw4MEuXbQ7EYoM2VzTpaL0LS6BV35ScPtQXVgmg0RE4Y5xsOus+RsRNWxsm8kgAGyrEoiPBIbEB6ljRETUis+E8MCBA8jJyUFaWhquv/569OnTB5988gksFgs+//xzNDc3n6irFM5kSx2gKF1aQ/j2IYFaOzDrNJaaICLqbhgHu85VXw3HoX3tThcF3OsHxyVL0HFZBRGRKjx2GX311VexcuVKbN26FRkZGfjzn/+M2267DX369DnRZtKkSVi0aBEmTZoU1M6q4Zei9J0fIVyyT8GVaRKGxDPIERF1F4yDp8628xvAEIHokee22U4Ige+qBG4fxgenRERq8UgI58yZg6lTp+Lxxx9vM9ANGTIEDz/8cEA7FwqOF6U39OzcpjLbqhRsrxH4+Ap9ILpFREQBwjh46qz5GxGVNQ666Ng225U3AxU2cIdRIiIVeSSE5eXlSExMbPcL+/bti3/84x8B6VQokRuqIRmjIbUT1E62ZJ+CjDjgyjQGOSKi7oRx8NS4aiuw8uMv8PLBOsx39kZ2drbPttuq3fvajWNCSESkGo85GlarFTt27PDaeMeOHSgrKwt4p0KJuwZhcqdKRlTZBN4+JDD7NB30OgY5IqLuhHHw1LQczEfujmIcLq/AwmeebrPttmqBfrFAaixjJRGRWjwSwlmzZuGNN97w2nj16tWYM2dOwDsVSlwNVZ3eUObVAwp0EnD7UK6JICLqbhgHT439QD7mXDYemRkDMW9+Tpttt1WxID0Rkdq8Fqb3tWbi4osvxpYtWwLeqVAi11d3akMZlyLw7AvLoX84C/95LTeAPSMiokBgHOw6oSiwF+zEjNtuReGhw21OF1WEwPYaJoRERGrzSAibmpoQERHhvbFOh8bGxoB3KpTIDTWd2lDm/WKB+o8Xo/lIERY8szCAPSMiokBgHOw6Z3kRlGYLooaOabftgQag0ckNZYiI1OaREGZlZeH999/32njNmjUYNmxYwDsVKoTLAaWxvsMjhEIIPLtbwbDfz8WAjEw8MH9egHtIRET+xjjYdfaDOyFFGhE5cHi7bRe8sAz4axZ2vc/ZNEREavLYZfQvf/kLpk+fDr1ej9tvvx2pqak4cuQI8vLy8PLLL2PlypVq9FMVckMtAHR4DeGmCoHvqwXWPjALVy3lGhMiou6IcbDrWg7mI3LQSEiGyHbbvpu7CKguwvOLnsPcu2cFoXdEROSNR0J4yy23oLKyEo8++ihyc395ahcdHY0FCxbg1ltvDWoH1eSqrwIA6Ht2bIRw4W4FI3qx1AQRUXfGONg1wumA/dBexE9u/+cjKwK4fC5MGxZzNg0Rkco8EkIAuP/++zFz5kxs2bIFtbW1SExMxLnnnguTyRTs/qlKbqgGgA5NGd1fL/BRiUDehfpOlaggIqLQwzjYefbDPwJOB4wdWD/4fbVA83kzsPnpWZjQhztyExGpyWtCCAAmkwlXXHFFMPsScuSGGuhiTdBFRrXbdtEeGakxwE2ZTAaJiMIB42Dn2A/mQ9ejJyL6Dmy37dpSgQQjMJ4byhARqc5nQlhYWIiDBw+ipaXF49jvfve7gHYqVBwvSt+eCqvA6wUCj52pQ6SewY2IKBwwDnZOy8F8GIeOhqRrf8RvbanAFWkS9DrGTCIitXkkhBaLBVOnTsXXX38NwL1zJoBW0yBlWQ5O71QmN1R3aEOZJfsUROqBmVmc9kJE1N0xDnaeYm2Es7QAPSZc3W7bSqvADzUCfxmhD0LPiIioPR4ZTE5ODioqKrBp0yYIIfD+++/j66+/xh133IGMjAxs3bpVjX6qQq6vanf9YJNT4KX9Cu4apkNPI590EhF1d4yDnddSsAsQAsZh7a8fXF8mIAG4ghuwERGFBI+EcP369fjrX/+K8ePHAwBSU1NxwQUXYMWKFbj22mvx3HPPBb2TanE11EDfTlH6vAMKLA7gnhEcHSQiCgeMg51nP5APQ3I/GHq1HTMBYG2pgrN7S0iOZkJIRBQKPLKYqqoqpKenQ6/XIzY2FrW1tSeOTZ48GevXrw9qB9Wi2G0QtiYY2hghdCkCi/YouGGQhAFxDGxEROGAcbDz7AfzO7S7qEsR+LRM4CqODhIRhQyPhDA9PR01NTUAgCFDhuDDDz88cWzLli2Iimp/x81wINcfKznRxqYy7x0WKG4C5o3iOggionDBONg5rtoKuGqOIGro6HbbbqkUMDuAyf2ZEBIRhQqPTWUuu+wybNiwAVOnTsXcuXNx66234rvvvkNkZCS2bduG++67T41+Bp18oii9901lhBB4dreCS1IljEliYCMiCheMg51jL9gJSDoYh5zRbtu1pQLJUcCZjJtERCHDIyF8+umnYbVaAQA333wzevTogXfeeQc2mw1LlizBzJkzg95JNcgN1YAk+UwIv6kQ2F4jsO5Kjg4SEYUTxsHOaTmQj4j0IdDFxLXbdm2pgqvSJegkJoRERKGiVULocDiwfv16jB49GklJ7kRo6tSpmDp1qiqdU5OroQY6UwIkvfdSjQt3KxjRi7ukERGFE8bBzhGKAnvBTsSee1W7bcuaBHbXAQ+N5iZsREShpNW7cmRkJG666SaUlJSo1Z+QIddXw+Bj/eCP9QIflwjMG6VvVZeKiIi6N8bBznEeOQylydyhDWXWlQnoJOByPkglIgopHo/phg8fzkCItovSL9ojIzUGuDGTQY2IKNwwDnac/WA+pAgjjAOz2m27rlTBhN4SerFmLxFRSPFICJ966ik88cQT2L59uxr9CRlyfZXXHUaPWgXeKBC4Z4QOkXoGNSKicMM42HEtB/MROeh0SBGRbbZzyAKflwvuLkpEFII8FsjNnz8ftbW1GD9+PBITE9GnT59W0yIlScKuXbuC2slgE0JAbqiB3ksNwiX7FETqgRnDuQaCiCgcMQ52jHA54CjaC9NVN7fb9n8VAk1OYHI6YycRUajxSAjPPPNMnHXWWWr0JWQo1kYIpx36Xr1bvd7kFFi2X8GM4Tr05JQXIqKwxDjYMfbD+yGc9g6tH1xbKpAaA4xKCELHiIioUzwSwtdee02FboSW40XpT95UZuUBBRYHcM8IPuEkIgpXjIMdYz+YD11sPCJSM9pte7zcBDdiIyIKPcxsvJAb3AnhyZvKPPPCchj/noW1q3LV6BYREVHIsB/cCePQ0ZB0bf8pcdgisL+B00WJiEKVxwjh7bff3u4XrVy5MiCdCRVyfRWgN0AX16vV60fWLIaoKsKCZxYiOztbpd4REVEgMQ62T7E2wlFyEL3OuaLdtuvKFBgk4NJ+HB0kIgpFHglhfn6+R6P6+nqUlpYiKSkJ/fr1C0rH1CQ31EDfM6nVU88Wl4C4bC4Sv1mMB+bPU7F3REQUSIyD7bMX7gaE0rH1gyUC56dIMEUyISQiCkUdSggBYP/+/bjxxhvx3HPPBbxTanM1VHuUnKi0AbhwBv791CxcwWkvRERhi3GwfS0H82FISoUhMaXtdi6BL48IPHYW4yYRUajq8Dt0VlYWcnJyMHfu3ED2JyTI9dUwnLR+sNImAAB9YviEk4hIi7QUB9tjP5DfodHBjUcFbDLXDxIRhbJOvUPHx8ejsLAwUH0JGbKXEcIKm/tzn2gVOkRERCFBK3GwLa76Kriqy2Ec1rFyEwN6AFk9g9AxIiLqEo8po3V1dR6NHA4H9u/fj4ceeggjRowISsfUIhTZa1H6ShsgAUiOUqdfREQUHFqPg+2xH9wJSBKiBp/Rbtu1pQomp+tYboKIKIR5JIRJSUle37iFEEhPT8cHH3wQlI6pRWlsABTZoyh9pVUgKQow6BjUiIjCmdbjYHtaDuYjIm0wdLFxbbYrMAsUWoDF5zBuEhGFMo+EcOXKlR6BMCoqCmlpaRg/fjwMBo8vCSuu+uM1CD2njKbEqNEjIiIKJq3HwbYIRYH9QD5ix1/ebtu1pQqMeuDiVCaEREShzCOqTZ8+XYVuhI7jRekNHruMCvSJZlAjIgp3Wo+DbXFWFENpaujY+sESgQtTJMRGMHYSEYUyj01ldu3ahbVr13ptvHbtWuzevTvgnVKT3FANKdIIKaZHq9crrNxQhohIC7QeB9tiP5APRETCmHF6m+2anQJfHxWY3J/JIBFRqPNICOfOnYstW7Z4bbxt2zbcd999Ae+UmuR69w6jJ08XqrQJpHCEkIgo7Gk9DrYl95VXcf5r3yD31ZVttvvqiIBDYbkJIqLuwOOdeufOnTjvvPO8Nj733HOxY8eOgHdKTXJDtceGMoB7l1GOEBIRhT+tx0FfhBBYsvYbFNeYsfCZp9tsu7ZUYLAJGBLPB6lERKHOIyG02+1wOBxeG9vtdrS0tAS8U2py1VdDf1JReptLwOIEUliUnogo7Gk9Dvoi11Zg5pgBGJTWF/Pm5/hsJ4Q4UW6CiIhCn8e79ZgxY/D66697bfz666/jjDParzvUnXkrSl/JovRERJqh9Tjoi6O8CDePSsfBH/chOzvbZ7v9DcDPn6zAWzcOw/Lly4PYQyIi6gqPXUYffPBBTJkyBVdffTVuu+02pKam4siRI8jLy8Onn36KNWvWqNHPoBAuJ5TGehhOKkpfYRUAwF1GiYg0QMtxsC3OskLo4hOhj+vVZrt1pQrw2WJUVRdhwTML20weiYhIfR4J4dVXX43Vq1fj/vvvx/XXXw9JkiCEQFpaGlavXo2rr75ajX4GhWyuBYTwOUKYwhFCIqKwp+U42BZHaSEi+2W22259mcDpN8xF0yeL8cD8eUHoGRERnQqv1XVvuOEG3HDDDThw4ABqa2uRmJiIYcOGBbtvQXe8BuHJm8pU2gCdBCRFqdErIiIKNq3GQV+EEHCWFSJ2wuQ22zU7Bb45KvDszGz8+cU5QeodERGdCq8J4XFaC35y/bGE8KQRwgqbQHIUoNdxyigRkZZoLQ76oljqoDQ1IDJtcJvtjpebuIobyhARdRse79h//etfMXPmTK+NZ86cib///e8B75RaXA3VkGJ6QGdsPRTIkhNERNqh5Tjoi6OsEAAQkdb2lNH1ZQKD4oDBpmD0ioiI/MEjIXzzzTcxceJEr43PP/98vPnmmwHvlFrkhmoYThodBNxF6bmhDBGRNmg5DvriLC2ELibOa53e44QQWFeq4Kp0HSSJMZOIqLvwSAiPHDmC9PR0r43T0tJQVlYW8E6pRa73XpS+wsoNZYiItELLcdAXR3kRItIGt5noFVqAQ43AlWlMBomIuhOPhDA5ORl79+712njv3r1ISEgIeKfUItd71iAEjo0Qsig9EZEmaDkO+uIsK0REO+sH15UqiNQBF6cyXhIRdSceCeFvf/tbPPLII9i2bVur17///ns89thjmDp1atA6F2zuovRJHq9X2DhCSESkFVqOg97ITWbI9VWI7MD6wQv6SoiNYEJIRNSdeOwy+sQTT2Dz5s0499xzkZWVdaIg7/79+zF69Gj885//VKOfAac4WqBYG6E/qSi91SXQ5GRReiIirdBqHPTFWV4EAG2OENpcAl8dEXjiLO4uSkTU3Xi8c8fHx2Pr1q1Yvnw5Ro4cCQAYOXIkVqxYga1bt6Kuri7onQyG4yUnTt5UptLq/sxdRomItEGrcdAXZ1kRJGM0DEmpPtt8c1SgRWa5CSKi7shrHcLIyEjcdddduOuuuwAANTU1ePvtt3HRRRdh69atkGU5qJ0MBl9F6StsAgCQwjWERESaocU46IujrBAR/QZB0vlO9taVCaTHAlk9g9gxIiLyC5+F6a1WK95//32sXr0aGzZsgMvlwujRo7F48eJg9i9o5PpqQJKgj09s9Xqlzf2ZI4RERNqitTjoi7OsEFFZZ7XZZj3LTRARdVutEkJZlrF+/XqsXr0aH374IaxWK1JSUuByufDmm2/i+uuvV6ufASc3VEMX1xOSIaLV6xVWAb0EJBpV6hgREQWNluOgN0pLM1zV5Yi47A8+2xy2CBwwA0+NYzJIRNQdGQBg8+bNWL16Nf773/+ipqYGiYmJmDZtGm666SaMGDECiYmJSElJUbuvAeVq8FVyAkiOAvQ6BjoionDFOOids/wwACCyjQ1l1pcpMEjAJf0YJ4mIuiMDAJx//vmQJAkXX3wx7r33Xlx++eUwGNyDh2az+ZQuUFBQgFtvvRU1NTWIj4/Ha6+9htNPP71Vm7y8PDz//PMn/l1WVoYLLrgA77333ilduzPk+mqPDWUAd0LI6aJEROGNcdA7R1khYIiAoU+6zzbrSgXOS5FgimRCSETUHekA9+5pQghs3LgRzz//PFavXo3Gxka/XGDmzJmYMWMGDh48iJycHEyfPt2jzW233YadO3ee+EhJScEf//hHv1y/o+T6ao8NZQD3pjLcUIaIKLwxDnrnLCtERGoGJL33LQfsssCXRwSuTGOcJCLqrnQAsGvXLuzduxf3338/CgoKMH36dKSkpOD666/HmjVrurxIvKqqCtu3b8e0adMAANdddx1KS0tRWFjo82u+++47VFVVYcqUKV26ZlcIIXwWpecIIRFR+NN6HPTFWVbY5nTRzRUCzS6WmyAi6s5OvIOfdtppePLJJ3Ho0CFs2rQJ06dPx8aNG088yXz++efxzTffdOrkpaWl6Nu374lpN5IkoX///igpKfH5Na+++ipuvvlmRERE+GwDABaLpdWH3W7vVN9+TdiaIBwtHkXpAaDSJpDCovRERGFPy3HQG+Gww1lZ0mZB+nVlAn1jgFEJfr00EREFkdc5IOeddx7OO+88vPDCC/j000/x5ptvYs2aNfjggw8wYMAAHDp0KCCdaW5uxltvvYWtW7e22zY9vfV6hvnz5yMnJ6dL11Uq3YG5WWdEy0kFhyuaeyBOWFFX5+zSucNdfX292l3QPN4D9fEeqK8z9yAhof3sRWtx0Bu5vBBQFNjikuE4KTYe90lxDC5OVlBf75/ptd0Rf//Vx3ugPt4D9Z1KHPRZhxAA9Ho9Jk+ejMmTJ8Nms+GDDz7Am2++2eGLpaen4+jRo3C5XDAYDBBCoKSkBP379/fa/r///S9OP/10nHbaae2eu7S0FC6X68Q3ZDQaYTR2rTaE7WgBWgAk9M+EvucvP6Amp0Cz7EJGUiwSEjgdxpeO/HFFgcV7oD7eA/UF4h5oJQ5607S/GnadDknDz4AUEel5/SaB/RYXHjkrEgkJMX67bnfE33/18R6oj/dAfV29Bx3OcqKjo3HjjTfiww8/7PDJe/fujbFjx2LVqlUAgHfffRdpaWkYPNj79JNXX30Vd9xxR4fObTKZWn2cShCU66sBnR46U69Wrx8vSp/CNYRERJoXznHQG2dZISJSBnhNBgHg0zIBnQRcxg1liIi6tYAPe+Xm5iI3NxdDhw7FggULkJeXBwC48847WwXVAwcOYOfOnbjhhhsC3SUPL7/5H5z/2jfIXfFyq9crbQIA0IdrCImIqIu6Qxz0xlFWiIh+mT6PrytVcE5vCb2MjJFERN1Zm1NG/WHYsGHYsmWLx+uvvPKKRzt/bfHdWS+88wmK65uw8JmnkZ2dfeL1Sqv7c4q2Z8IQEdEp6A5x8GRCdsF5pBgxZ13i9bhTEdhQLjBvFJdTEBF1d3wnBzB70jgMTO6FefNbL8avsAnoJSDBv7NwiIiIQpqzsgSQnYhM9z61dUulgMUJXJXO0UEiou4u4COE3cHNowfirv83BT2vy271eqUN6B0N6LpYf4qIiKg7cpYWApKEiNRBXo+vLxNIjgLGJjE+EhF1dxwhBKCYa6Ezee7KU2HlhjJERKQ9zrJCGJJSoYvyvmZiXamCK9IkPjAlIgoDmk8IhcsBxdoIfXyix7FKm+CGMkREpDmO8iKfBemPWgV21gJXpmv+TwgiorCg+Xdz2ewutqv3MkJYaeOGMkREpC1CUeAsK/K5fvDTMgEJwOX9+MCUiCgcMCG0HEsIvYwQVnCEkIiINMZVXQ7haPFZcmJ9qYKzkiUkMz4SEYUFJoTHE0JT64RQCIFKG9CHawiJiEhDnGWFAOB1yqhLEfisXOBKFqMnIgobmk8IFUsdYIiAFNOj1etNTsDqAlL4BJSIiDTEUV4Efa/e0MeaPI59Xy1Qb2e5CSKicKL5hFA210JvSoB00k5plTb3Z44QEhGRljjLChHhY/3g+jKBXkbg7GQmhERE4YIJoaXOx4YyAgCQEsOgR0RE2iCEgKOsEJE+1g++8cpy2B/IwssrcoPcMyIiChQmhOZaHxvKuD9zhJCIiLRCrq+CsDZ5XT8ohEDxe4thPVqEBc8sVKF3REQUCEwILXVei9JX2gQMEtDLqEKniIiIVHB8Q5lILwlhgwMQl81FUnomHpg/L9hdIyKiADGo3QG1KT6njAJ9YgCdxCmjRESkDY6yQujienmdOVPcCODCGVi7eBbGJWv+eTIRUdjQ9Du6cDmgNFu8JoQVVk4XJSIibXGWFXodHQSAw43utfUDe/BBKRFRONF0Qihb6gF4L0pfaRMsOUFERJriKCtCRJr3DWWKGwViDEBSVJA7RUREAaXthNBcCwDeRwhZlJ6IiDRENtdCsdT5HCEsbgIy4uBRpomIiLo3TSeEiqUOAKDzNkJoFejDEUIiItIIR3kRAHjdYRRwTxnldFEiovCj6YRQttQB+gjoYuJavS6EQKUNSIlRqWNERERB5iwrhBTdA/qEPl6PFzcKDIxjQkhEFG40nxDqTb08pr80OgGbDI4QEhGRZjjLihCZNtjrlFAhxIkpo0REFF60nRD6KEpfeawofQrXEBIRkUY4ygp9bihTaweanOAIIRFRGGJC6LXkhHtrbY4QEhGRFijNjZDrKn1vKMOSE0REYUvTCaFiqfO+ocyxEULuMkpERFrQ3oYyxY3uzwM5ZZSIKOxoOiF0ryH0HCGstAlE6IBeRhU6RUREFGTLl72Eif/ahFfe/cjr8eImgbgIIIFxkYgo7Gg2IRQuB5RmS5s1CFlriYiItOCF1e/i5wYrnlv4rNfjhxvdo4OMi0RE4UezCaFsqQfgvSh9pU0ghesHiYhII2Zfeg4GJvXEvPk5Xo8XswYhEVHYMqjdAbXIx4rS+9pllOsHiYhIK24eMxB3XHsFel2f7fV4caPApf00+wyZiCisafbdXTHXAgB0XncZZUJIRETaITd4L8ME/FKDkBvKEBGFJ80mhLKlDtAboIs1eRyrtAmkxHBqDBERhT/hckJpaoA+Psnr8eoWwOpiyQkionCl6YRQb0rwWCAvhDixqQwREVG4kxuPran3MUJ4vAZhhokJIRFRONJuQmj2XnLC4gTsMripDBERaYLcUAPAd0J4+HgNwh7B6hEREQWTdhNCi/f1EpVW9+c+MUHuEBERkQrkY2vqfU0ZLW4UiI8Eehr5oJSIKBxpNiFUzLXeN5SxuafG9OEIIRERaYBsrgUMEZBivA8BFjdxdJCIKJxpNiGULfU+S04AQArXEBIRkQbI5lro45N8Fp0/3CiQEceHpERE4UqTCaFwOaE0m72uIaywCkTqgPhIFTpGREQUZIq5xuf6QeBYUXomhEREYUuTCeGJHdW8JISVNiAlBj6flBIREYUT2VLXZg3Cn1mDkIgorGkzITxelD7eW0IouH6QiIg0Q27wPUJYaQNaZHDKKBFRGNNkQqhY6gAAepNnAGQNQiIi0gohxLE1hL5KTrg3WmNReiKi8KXJhFA21wJ6A3SxJo9jlTZuKENERNog7FYIR0sbRendnzlllIgofGkzIbS4i9J7WyfIKaNERKQVckM7NQibBBKMgCmScZGIKFxpMyE013ndUEYIgQqre1MZIiKicCebawCgzSmjHB0kIgpv2kwILbVeN5QxOwCHwqL0RESkDbL5+Jp6z5gIuKeMcv0gEVF402RCqFjqfG4oA3BTGSIi0gbZXANdTBykSKPX46xBSEQU/jSZEPqaMlppc++mlsIRQiIi0oC2dhhVjtUgzOCUUSKisKa5hFC4nFCazV4DYKXV/bkP1xASEZEGuJdQeE8Ij1rdyyg4ZZSIKLxpLiGUG+sBeF8vUWETiNIDpohg94qIiCj45AbfI4TFx2sQcsooEVFY015CeGwBvbdNZSqPFaX3Vo6CiIgo3CjmWp8lJw6zBiERkSZoLiFULMdqLnkbIbSyBiEREWmDUGTIjXVtjhAmRQE9IhgXiYjCmeYSQtmOlG5FAAAYHklEQVRSB+gN0MWYPI5V2liDkIiItEFpbAAUxXdC2CS4fpCISAO0lxCaa6GP6wVJ5/mtH58ySkREFO5k87EZMz6L0nOHUSIiLdBgQuh7ekyFTbDkBBERaYJsaTshZA1CIiJt0F5CaKnzuqGMEIIjhEREpBlyQy2g00PXo6fnMUWgpIkbyhARaYHmEkLFUut1Q5l6O+BUwE1liIhIE2RzDfSmBK9LKI5YAZdgDUIiIi3QXEIoW+qgN3kpSm9zf+amMkREpAVtLaE4fKwGYQanjBIRhT1NJYTC5YTSZPY6Qlhpcwc/jhASEZEWyOaaNtYPuj8P4JRRIqKwp6mEUG5sAOB9AX3F8RFCriEkIiINkM21bZac6B0NxBj4kJSIKNxpKyE8tsW2zscIYbQe6BER7F4REREFn2ypha6NKaOcLkpEpA2aSggVSx0AQO9ll9HjO4xKEgMgERGFN+GwQ1ib2pwyOrBHkDtFRESq0FRCKFuObbEdY/I4VmEVSIlhMkhEROHvl6L0SV6PswYhEZF2BDwhLCgowIQJEzB06FCMGzcO+/bt89puz549uOiii5CVlYWsrCy89957fu+LbK71ucU2axASEVEghFIcPK6tovQuRaC0GcjghjJERJpgCPQFZs6ciRkzZmD69Ol45513MH36dHz//fet2litVlx77bV4/fXXMXHiRMiyjLq6Or/3RbbUeV0/CAAVNoFxSZoaMCUioiAIpTh4nNxQA8B7QljWDMisQUhEpBkBzYCqqqqwfft2TJs2DQBw3XXXobS0FIWFha3arV69Gueccw4mTpwIANDr9UhOTvZ7f9qquVRpA/qwBiEREflRqMXB42RzLSRjDHRRnoGv+FgNQk4ZJSLShoAmhKWlpejbty8MBvdApCRJ6N+/P0pKSlq1+/HHH2E0GvGb3/wGo0ePxi233ILq6uo2z22xWFp92O32dvujWOq8biijCIEqThklIiI/C7U4eJy75IT3GTOHj9cg5KYyRESaEPApox3hcrmwYcMGbN26FampqXjooYcwa9YsvPPOOz6/Jj09vdW/58+fj5ycnDav42yoAQzRHtNw6uyAU4lDrNyMujpX178RDaqvr1e7C5rHe6A+3gP1deYeJCR4T4TUFKw4eJy9+ihETLzXaan7qyPRJyoCVks9rJ37NjSJv//q4z1QH++B+k4lDgY0IUxPT8fRo0fhcrlgMBgghEBJSQn69+/fql3//v1x8cUXo1+/fgCAadOm4Yorrmjz3KWlpXC5XCe+IaPRCKPR6LO9kF2wWi3okZKG2JN+CBX1AoALg5N7ICGB6wg7KxT/uNIa3gP18R6oLxTvQSjFwV+rsllgSOrr9WdW6XJhkCk0f56hij8r9fEeqI/3QH1dvQcBzX569+6NsWPHYtWqVQCAd999F2lpaRg8eHCrdtdffz2+//57WCwWAMDatWtxxhlntHluk8nU6qO9IChb3Fmzt01lKqzu9RIsO0FERP4USnHw12SL7zX1xY3AQO4wSkSkGQGfMpqbm4vp06fjySefhMlkQl5eHgDgzjvvxJQpUzBlyhT0798fDz30ECZMmACdTod+/fphxYoVfu3HL0XpPQNgpc39mWsIiYjI30IlDh4nhDi2htB7Qni4UWBiCmfLEBFpRcATwmHDhmHLli0er7/yyiut/n3zzTfj5ptvDlg/ThTh9TJCWGkTiDEAPSI4QkhERP4VKnHwOKXZAricXhNChyxQbuUOo0REWqKZR4CypRbQ6aGLNXkcW/fvXNgfzMLy5ctV6BkREVHwHC9Kr4tP8jhW1gwoAhjIHUaJiDRDQwlhHfSmXpB0nt/y5tcXQa4swoJnFqrQMyIiouCRG47NmPEyQnj4WA3CDI4QEhFphnYSQnMtdCbv6yVSp85FTN9MPDB/XpB7RUREFFyKuQaQJOhNvTyOFTcCEoB0jhASEWlGSNQhDAbFUu91/SAAiAtmYM4t2cgerw9yr4iIiIJLNtdBF9cTkt7zT4DiJoHUWMCo5wghEZFWaGqE0Nv0GCEEypqBtFgVOkVERBRksrkGeh8zZg43CmT0YDJIRKQl2kkILXXQx3uOENbZgRYZSItlACQiovDXVskJ1iAkItIeTSSEQnZBaWrwOmW0rNn9mSOERESkBbKlFnovO4wC7imjLDlBRKQtmkgI5cZ6APC6qUxZs3tHNY4QEhGRFsgNtV5nzNhlgSPNwEBOGSUi0hRNJISKuQ6A96L0Zc0CegnoEx3sXhEREQWXcDndM2b+f3v3Hht1uedx/DMzBXqxLQK2cimUSwERljsBJXL8AxRcASOYEImUVGtDVpclHHGzf9SNN1yMaMAj5xw5rRwSYgTWjZpsAkEsbnDBiAEV2ZZwabFQOtCZtvRCZ579o2VknGlR6cyvnef9Ssgwv99v2mfmmV+/+f6e5/d8o4wQnm+QjKSRTBkFAKtYkRAGfJ3XXKpqlIakSh43V0QBAIntxoyZaPHwbEcNQqaMAoBd7EgI/Vckt0futIyIfVWNhumiAAArBOpqJXWSEDZIbhc1CAHANpYkhF55Mu6Uyx35dik5AQCwxc8zZiKnjJ6pNxqWJvVhxgwAWMWShPCK3J0UpWeEEABgi4DPK/XpK1dq5DDg2XrDgjIAYCErEsKg70rUIrzGGFU2MEIIALBDwOeVJ2OgXK7IxI8ahABgJysSwoD/StQVRv3XpcY2Sk4AAOwQ9NV2WpT+TL3RSBaUAQDr2JEQ+qLXXKpqaH8cxg30AAALBPxXoiaETW1GF5tYYRQAbJTwCaEJtCnY6Ouk5ARF6QEA9gjURR8hPN9xgTSXC6QAYJ2ETwgD9VclY6IuKlPVKLkkDU6Nf7sAAIgnY0zHjJnIhPAMNQgBwFoJnxAGfVckKeqiMlWNRnenssQ2ACDxmZZrMq3NUUtOnK038rhYZA0AbJTwCWHA35EQRruHkJITAABLBOpu1CCMvEC694M/S/92j97/y5/j3SwAgMMSPyH0eSW3W+60zIh9FKUHANgi4KuVFD0h/J+/v6VAzWlt/I83490sAIDDEj8h9F+RJ32AXO7It8oIIQDAFoEbt1BESQjv/Md/0R1DRuvFF9bHu1kAAIclOd2AWAv4vXJHmS4qMUIIALBHwFcrd2q6XH36hm03xqhuTqH+/Z+KVPQPHodaBwBwSsKPEAY7KUrfcN2orpWSEwAAO3S2wujFJqmxTRqTQTwEABslfEIY8F2JusLohcb2R0YIAQA2aJ8xE7nCaIWvveQECSEA2CnxE8JORggpSg8AsEmgzht1xe0Kf3tN3tEZ8W8TAMB5CZ0QmkBAwYa6TkpOtD8OoSg9AMACQZ83ag3Ccr/RsDQpOYkLpABgo4ROCIP1VyVj5I5yz0RVo9FdyQRAAEDiM8GAAvVXot5DWOEzTBcFAIsldEL43p/+pLmlZdr+X/8dsY8VRgEAtgjW10nBYPSE0G80JrJULwDAEgmdEL7917/pnK9Jb/+lJGIfNQgBALYI+LySImsQGmNU4ZfyGCEEAGsldEK4fsOLGj0yV+s3bIjYR0IIALBFwN+REPYPv4ewpkmqv84KowBgs4QuTF9UVKSioqKo+5gyCgCwRaDOK7k9cqeFzw2t8HeUnMgkIQQAWyX0CGFnmtuMapspOQEAsEPAVytPxgC53OFhv8Lf/kjJCQCwl5UJ4YVr7Y+MEAIAbBDwdbLCqN9oaJqUyorbAGAtKxNCitIDAGwS8NVGTQjLKTkBANazNCFsfxzKCCEAwAIBn7eTEUJpDNNFAcBqdiaEDUZ39pPS+nBVFACQ+AJ+r9yZ4SuMtpecYIQQAGxnZ0LICqMAAEuY1haZaw3yZA4I2+5tkXytUh4rjAKA1SxNCKlBCACww89F6cNHCMt9HSUnGCEEAKtZmhAyQggAsEOoKP0v7iGk5AQAQLI2IWSEEABgh0BdraRoCaHR3SnSHdxPDwBWsy4hbA0YXWqi5AQAwA4Bn1eufqlyJ6eGba/wGe4fBADYlxBWX5OMmDIKALBDZyUnyik5AQCQhQkhRekBADYJ+Lzy9I9Wg5CSEwAAKxPC9kdGCAEANgj4auXJCE8IrzQbXW2h5AQAwMqE0Ci9j5TRlyAIAEh8Af+VqAvKSJScAABYmRAyOggAsIMxpmPK6C9qEFJyAgDQwcKEkJITAAA7BBv9Utt1eTIGhG2v8BllpTBbBgBgZULICCEAwA7vbd2iuaVl2v7p/rDtFX6jPKaLAgBkZULICCEAwA6bt/1V53xN2vx+adj2CkpOAAA6WJUQtgWNqq9RcgIAYIc/vvivGj0yV3/c8GLY9nIfJScAAO2SnG5APF1qkgKGKaMAADsUFRWpqKgobNvVFiNvizSGkhMAAFk2QkhRegCA7U53lJzgHkIAgGRdQtj+yAghAMBWFZScAADcxLKE0CjFI93Zz+mWAADgjHKf0aBkqX8/RggBANYlhO2jgy4XQRAAYKcKPwvKAAB+ZllCSMkJAIDdKvxSHtNFAQAd7EoIG7h/EABgtwq/YYVRAEBIr00IW1pa9MYbb6ilpeVXv6aq0WjYHQTB7vJ7+gDdiz5wHn3gPFv74Pe8b3+rUU2TmDLaTWz97vUk9IHz6APn3W4fuIwxppvbFKa8vFyrVq1SbW2tMjMzVVpaqnvvvTfsmIMHD2rhwoUaN25caNvhw4eVkpIS8fP8fr8yMzNVWVmpnJwc+Xw+ZWTceu5L0Bgl/61Nb89xa80Ez+2/MYT64tf2AboffeA8+sB5Pb0PekoclKRvao2m/2eb/neJR7Oyeu014R6jp3/3bEAfOI8+cN7t9kHMC9M/++yzKiwsVH5+vnbv3q38/HwdPXo04rhx48bp22+/jVk7LjdJ14PUIAQAxFdPiYOSVOHrqEHIlFEAQIeYXh6sqanR119/rZUrV0qSHn/8cVVWVqqioiKWvzaqn2sQEgQBAPHRk+Kg1H7/4IB+0p2UnAAAdIjpCGFlZaUGDx6spKT2X+NyuTR8+HCdP39eY8aMCTv29OnTmjZtmjwej1avXq01a9ZE/Zk3ZrhWV1dLki5cuCC/369+/fqpX7/OCwz+38Wg1BRQRluS/H4CYXfw+/1hj4g/+sB59IHzfk8fpKenx6UEUU+Kg5L0Q3WbRiZJfn/MJwhZgfPfefSB8+gD591uHOwREWHatGmqqqpSZmamqqqqtGjRIg0aNEhPPPFExLH19fWSpFmzZkmSJkyY8Jt+V94/3357ES4nJ8fpJliPPnAefeC839IHPe1el3jGQUnKvL3m4hc4/51HHziPPnDe742DMU0Ic3JyVF1drba2NiUlJckYo/Pnz2v48OFhx90clIcNG6YVK1bo0KFDUQPhkCFDdPr0afXp0yfs6u6vuTIKAIDUfmU0HoiDAICe6OY4GNOEMCsrS9OmTdPOnTuVn5+vPXv2aNiwYRHTZKqrq5WdnS232636+np9+umnKigoiPoz3W63Ro0aFctmAwDQLYiDAICeLuZlJ06dOqX8/Hx5vV5lZGSopKREkyZN0tNPP63Fixdr8eLF2rp1q9577z0lJSWpra1Ny5cvV3FxcVzu7wAAIJaIgwCAnizmCSEAAAAAoGfqlVVpy8vLdd9992ns2LGaOXOmvv/+e6ebZIXc3FyNGzdOU6ZM0ZQpU/Thhx9Koj9i5fnnn1dubq5cLldYbbKuPm/6ont11gednQsSfdDdmpubtXTpUo0dO1aTJ0/W/PnzQyUbampq9PDDDysvL08TJ05UWVlZ6HVd7UsEfM+cQRyML+Kg84iDzotLHDS90IMPPmhKSkqMMcZ89NFHZsaMGc42yBIjRowwx44di9hOf8TGF198YSorKyM+964+b/qie3XWB52dC8bQB92tqanJfPbZZyYYDBpjjNmyZYuZN2+eMcaY1atXm+LiYmOMMUeOHDFDhw41ra2tt9yXCPieOYM4GF/EQecRB50XjzjY6xLCS5cumfT0dHP9+nVjjDHBYNBkZ2eb8vJyh1uW+KKd/PRH7N38uXf1edMXsfNrAyF9EHtHjx41I0aMMMYYk5aWZqqrq0P7Zs6cafbt23fLfb0d3zPnEAedQRx0HnGw54hFHOx1U0a7KvKL2Hvqqac0adIkFRQU6PLly/RHnHX1edMX8fXLc0Hi71M8vPPOO1qyZIm8Xq+uX7+uu+++O7QvNzdX58+f73JfIuB75izioLOIgz0HcdAZsYiDvS4hhHPKysp0/PhxffPNNxo0aJBWrVrldJMAR3AuOOO1115TRUWFXn/9daebAktx7gPtOBecEas4GNM6hLHwa4v8ovvd+Iz79OmjtWvXauzYsfRHnHX1eWdkZNAXcRLtXJD4+xRLb775pvbu3av9+/crNTVVqampSkpK0sWLF0NXQM+ePavhw4dr4MCBne5LBHzPnEMcdB5xsGcgDsZfLONgrxshvLnIr6ROi/yiezU2Nqquri70fNeuXZo6dSr9EWddfd70RXx0di5I/H2Klbfeeku7du3Svn371L9//9D25cuXa9u2bZKko0eP6sKFC5o3b94t9/V2fM+cQRzsGYiDziMOxl/M42B33+gYDz/++KOZPXu2ycvLM9OnTzfHjx93ukkJ7/Tp02bKlClm0qRJZuLEiWbx4sXmzJkzxhj6I1YKCwvN0KFDjcfjMVlZWWb06NHGmK4/b/qie0Xrg67OBWPog+5WWVlpJJlRo0aZyZMnm8mTJ5tZs2YZY4y5ePGimT9/vhkzZoyZMGGCOXDgQOh1Xe1LBHzP4o84GH/EQecRB50XjzhIYXoAAAAAsFSvmzIKAAAAAOgeJIQAAAAAYCkSQgAAAACwFAkhAAAAAFiKhBAAAAAALEVCCAAAAACWIiEEHPTSSy/J5XJF/bdx48a4t6e0tFQul0u1tbVx/90AAPsQBwHnJTndAMB2KSkpOnDgQMT24cOHO9AaAADiizgIOIuEEHCY2+3W7NmznW4GAACOIA4CzmLKKNDD3Zg288ILL+iuu+5Senq68vPzVV9fH3bcuXPntGzZMmVmZiotLU0PPfSQTpw4EfHzduzYoalTpyo5OVmDBg3SokWLdO7cubBjKisrtXDhQqWlpSkvL087duyI6XsEAKAzxEEgtkgIgR6gra0t4t/NtmzZopMnT+qDDz7Qxo0btWfPHj3zzDOh/fX19frDH/6gY8eOadu2bdq5c6e8Xq8eeOABVVZWho7btGmTVq1apenTp2vv3r3avn278vLydPny5bDf9+STT2rBggX6+OOPNXXqVOXn5+vkyZOx/RAAANYiDgIOMgAcU1xcbCRF/Xfo0CFjjDGSzMiRI01bW1voddu3bzcul8ucPHnSGGPMO++8Y1wul/nhhx9Cx3i9XpOWlmbWrVtnjDGmrq7OpKammsLCwk7bU1JSYiSZd999N7StoaHBpKammpdffrlb3zsAAMRBwHncQwg4LCUlRWVlZRHbx48fH/r/o48+Ko/HE3q+bNkyFRQU6MiRIxo/frwOHTqkiRMn6p577gkdM2DAAM2fP19ffvmlJOnw4cO6du2aCgoKbtmmBQsWhP6flpamESNGqKqq6ne9PwAAukIcBJxFQgg4zO12a8aMGV0ek5WVFfY8IyNDycnJqq6uliRdvXpV2dnZEa/Lzs7Wd999J0nyer2SpCFDhtyyTf379w973rdvXzU3N9/ydQAA/FbEQcBZ3EMI9AI1NTVhz/1+v5qbmzV48GBJ7VdBf3mMJF26dEkDBgyQJA0cOFCS9NNPP8W4tQAAdC/iIBA7JIRAL/DJJ58oEAiEnu/evVsul0szZ86UJM2dO1cnTpzQqVOnQsdcvXpV+/fv19y5cyVJc+bMUWpqqkpKSuLbeAAAbhNxEIgdpowCDgsGg/rqq68itmdlZWnUqFGSpJaWFi1dulRr1qzRmTNntGHDBi1btix0r8Tq1au1efNmPfLII3rllVeUnJysV199VUlJSVq7dq0kKTMzU8XFxdqwYYOCwaCWLFmiYDCozz//XCtWrLjldB0AAGKBOAg4i4QQcFhTU5PmzJkTsb2goEDvv/++JOm5557T5cuXtXLlSrW2tuqxxx7T1q1bQ8emp6fr4MGDWrdunQoLCxUIBHT//ferrKxMOTk5oeNu1HDavHmzSktLlZ6erjlz5kTcmwEAQLwQBwFnuYwxxulGAOicy+XSpk2btH79eqebAgBA3BEHgdjiHkIAAAAAsBQJIQAAAABYiimjAAAAAGApRggBAAAAwFL/D9FNL7dP0ykaAAAAAElFTkSuQmCC" - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "plt = plot(\n", - " plot(baseline_log, title=\"Baseline\", ylim=(.5,1)),\n", - " plot(augmented_log, title=\"Augmented\", ylim=(.5,1)),\n", - " size = (900, 400),\n", - " xlab = \"Epoch\",\n", - " ylab = \"Accuracy\",\n", - " markersize = 1\n", - ")\n", - "plt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note how the accuracy on the (unaltered) training set\n", - "increases faster for the baseline network than for the\n", - "augmented one. This is to be expected, since our augmented\n", - "network doesn't actually use the unaltered images for\n", - "training, and thus has not actually seen them. Given this\n", - "information, it is worth pointing out explicitly how the\n", - "accuracy on training set is still greater than on the test set\n", - "for the augmented network as well. This is also not a\n", - "surprise, given that the augmented images are likely more\n", - "similar to their original ones than to the test images." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For the baseline network, the accuracy on the test set\n", - "plateaus quite quickly (around 90%). For the augmented network\n", - "on the other hand, it the accuracy keeps increasing for quite\n", - "a while longer." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## References\n", - "\n", - "**MNIST1998**: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. [\"The MNIST database of handwritten digits\"](http://yann.lecun.com/exdb/mnist/) Website. 1998." - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 0.6.3-pre", - "language": "julia", - "name": "julia-0.6" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "0.6.4" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/generated/mnist_knet/index.html b/generated/mnist_knet/index.html deleted file mode 100644 index fda3ece7..00000000 --- a/generated/mnist_knet/index.html +++ /dev/null @@ -1,265 +0,0 @@ - -MNIST: Knet.jl CNN · Augmentor.jl

    MNIST: Knet.jl CNN

    MNIST: Knet.jl CNN

    In this tutorial we will adapt the MNIST example from Knet.jl to utilize a custom augmentation pipeline. In order to showcase the effect that image augmentation can have on a neural network's ability to generalize, we will limit the training set to just the first 500 images (of the available 60,000!). For more information on the dataset see [MNIST1998].

    Note

    This tutorial is also available as a Juypter notebook. You can find a link to the Juypter version of this tutorial in the top right corner of this page.

    Preparing the MNIST dataset

    In order to access, prepare, and visualize the MNIST images we employ the help of three additional Julia packages. In the interest of time and space we will not go into great detail about their functionality. Feel free to click on their respective names to find out more information about the utility they can provide.

    • MLDatasets.jl has an MNIST submodule that offers a convenience interface to read the MNIST database.

    • Images.jl will provide us with the necessary tools to process and display the image data in Julia / Juypter.

    • MLDataUtils.jl implements a variety of functions to convert and partition Machine Learning datasets. This will help us prepare the MNIST data to be used with Knet.jl.

    using Images, MLDatasets, MLDataUtils
    -srand(42);

    As you may have seen previously in the elastic distortions tutorial, the function MNIST.traintensor returns the MNIST training images corresponding to the given indices as a multi-dimensional array. These images are stored in the native horizontal-major memory layout as a single array. Because we specify that the eltype of that array should be Float32, all the individual values are scaled to be between 0.0 and 1.0. Also note, how the observations are laid out along the last array dimension

    @show summary(MNIST.traintensor(Float32, 1:500));
    summary(MNIST.traintensor(Float32, 1:500)) = "28×28×500 Array{Float32,3}"

    The corresponding label of each image is stored as an integer value between 0 and 9. That means that if the label has the value 3, then the corresponding image is known to be a handwritten "3". To show a more concrete example, the following code reveals that the first training image denotes a "5" and the second training image a "0" (etc).

    @show summary(MNIST.trainlabels(1:500))
    -println("First eight labels: ", join(MNIST.trainlabels(1:8),", "))
    summary(MNIST.trainlabels(1:500)) = "500-element Array{Int64,1}"
    -First eight labels: 5, 0, 4, 1, 9, 2, 1, 3

    For Knet we will require a slightly format for the images and also the labels. More specifically, we add an additional singleton dimension of length 1 to our image array. Think of this as our single color channel (because MNIST images are gray). Additionally we will convert our labels to proper 1-based indices. This is because some functions provided by Knet expect the labels to be in this format. We will do all this by creating a little utility function that we will name prepare_mnist.

    """
    -    prepare_mnist(images, labels) -> (X, Y)
    -
    -Change the dimension layout x1×x2×N of the given array
    -`images` to x1×x2×1×N and return the result as `X`.
    -The given integer vector `labels` is transformed into
    -an integer vector denoting 1-based class indices.
    -"""
    -function prepare_mnist(images, labels)
    -    X = reshape(images, (28, 28, 1, :))
    -    Y = convertlabel(LabelEnc.Indices{Int8}, labels, 0:9)
    -    X, Y
    -end

    With prepare_mnist defined, we can now use it in conjunction with the functions in the MLDatasets.MNIST sub-module to load and prepare our training set. Recall that for this tutorial only the first 500 images of the training set will be used.

    train_x, train_y = prepare_mnist(MNIST.traintensor(Float32, 1:500), MNIST.trainlabels(1:500))
    -@show summary(train_x) summary(train_y);
    -[MNIST.convert2image(train_x[:,:,1,i]) for i in 1:8]
    summary(train_x) = "28×28×1×500 Array{Float32,4}"
    -summary(train_y) = "500-element Array{Int8,1}"

    training images

    Similarly, we use MNIST.testtensor and MNIST.testlabels to load the full MNIST test set. We will utilize that data to measure how well the network is able to generalize with and without augmentation.

    test_x, test_y = prepare_mnist(MNIST.testtensor(Float32), MNIST.testlabels())
    -@show summary(test_x) summary(test_y);
    -[MNIST.convert2image(test_x[:,:,1,i]) for i in 1:8]
    summary(test_x) = "28×28×1×10000 Array{Float32,4}"
    -summary(test_y) = "10000-element Array{Int8,1}"

    test images

    Defining the Network

    With the dataset prepared, we can now define and instantiate our neural network. To keep things simple, we will use the same convolutional network as defined in the MNIST example of the Knet.jl package.

    using Knet

    The first thing we will do is define the forward pass through the network. This will effectively outline the computation graph of the network architecture. Note how this does not define some details, such as the number of neurons per layer. We will define those later when initializing our vector of weight arrays w.

    """
    -    forward(w, x) -> a
    -
    -Compute the forward pass for the given minibatch `x` by using the
    -neural network parameters in `w`. The resulting (unnormalized)
    -activations of the last layer are returned as `a`.
    -"""
    -function forward(w, x)
    -    # conv1 (2x2 maxpool)
    -    a1 = pool(relu.(conv4(w[1], x)  .+ w[2]))
    -    # conv2 (2x2 maxpool)
    -    a2 = pool(relu.(conv4(w[3], a1) .+ w[4]))
    -    # dense1 (relu)
    -    a3 = relu.(w[5] * mat(a2) .+ w[6])
    -    # dense2 (identity)
    -    a4 = w[7] * a3 .+ w[8]
    -    return a4
    -end

    In order to be able to train our network we need to choose a cost function. Because this is a classification problem we will use the negative log-likelihood (provided by Knet.nll). With the cost function defined we can the simply use the higher-order function grad to create a new function costgrad that computes us the corresponding gradients.

    """
    -    cost(w, x, y) -> AbstractFloat
    -
    -Compute the per-instance negative log-likelihood for the data
    -in the minibatch `(x, y)` given the network with the current
    -parameters in `w`.
    -"""
    -cost(w, x, y) = nll(forward(w, x), y)
    -costgrad = grad(cost)

    Aside from the cost function that we need for training, we would also like a more interpretable performance measurement. In this tutorial we will use "accuracy" for its simplicity and because we know that the class distribution for MNIST is close to uniform.

    """
    -    acc(w, X, Y; [batchsize]) -> Float64
    -
    -Compute the accuracy for the data in `(X,Y)` given the network
    -with the current parameters in `w`. The resulting value is
    -computed by iterating over the data in minibatches of size
    -`batchsize`.
    -"""
    -function acc(w, X, Y; batchsize = 100)
    -    sum = 0; count = 0
    -    for (x_cpu, y) in eachbatch((X, Y), maxsize = batchsize)
    -        x = KnetArray{Float32}(x_cpu)
    -        sum += Int(accuracy(forward(w,x), y, average = false))
    -        count += length(y)
    -    end
    -    return sum / count
    -end

    Before we can train or even just use our network, we need to define how we initialize w, which is our the vector of parameter arrays. The dimensions of these individual arrays specify the filter sizes and number of neurons. It can be helpful to compare the indices here with the indices used in our forward function to see which array corresponds to which computation node of our network.

    function weights(atype = KnetArray{Float32})
    -    w = Array{Any}(8)
    -    # conv1
    -    w[1] = xavier(5,5,1,20)
    -    w[2] = zeros(1,1,20,1)
    -    # conv2
    -    w[3] = xavier(5,5,20,50)
    -    w[4] = zeros(1,1,50,1)
    -    # dense1
    -    w[5] = xavier(500,800)
    -    w[6] = zeros(500,1)
    -    # dense2
    -    w[7] = xavier(10,500)
    -    w[8] = zeros(10,1)
    -    return map(a->convert(atype,a), w)
    -end

    Training without Augmentation

    In order to get an intuition for how useful augmentation can be, we need a sensible baseline to compare to. To that end, we will first train the network we just defined using only the (unaltered) 500 training examples.

    The package ValueHistories.jl will help us record the accuracy during the training process. We will use those logs later to visualize the differences between having augmentation or no augmentation.

    using ValueHistories

    To keep things simple, we will not overly optimize our training function. Thus, we will be content with using a closure. Because both, the baseline and the augmented version, will share this "inefficiency", we should still get a decent enough picture of their performance differences.

    function train_baseline(; epochs = 500, batchsize = 100, lr = .03)
    -    w = weights()
    -    log = MVHistory()
    -    for epoch in 1:epochs
    -        for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)
    -            batch_x = KnetArray{Float32}(batch_x_cpu)
    -            g = costgrad(w, batch_x, batch_y)
    -            Knet.update!(w, g, lr = lr)
    -        end
    -
    -        if (epoch % 5) == 0
    -            train = acc(w, train_x, train_y)
    -            test  = acc(w, test_x,  test_y)
    -            @trace log epoch train test
    -            msg = "epoch " * lpad(epoch,4) * ": train accuracy " * rpad(round(train,3),5,"0") * ", test accuracy " * rpad(round(test,3),5,"0")
    -            println(msg)
    -        end
    -    end
    -    log
    -end

    Aside from the accuracy, we will also keep an eye on the training time. In particular we would like to see if and how the addition of augmentation causes our training time to increase.

    train_baseline(epochs=1) # warm-up
    -baseline_log = @time train_baseline(epochs=200);
    epoch    5: train accuracy 0.550, test accuracy 0.460
    -epoch   10: train accuracy 0.694, test accuracy 0.592
    -epoch   15: train accuracy 0.820, test accuracy 0.749
    -epoch   20: train accuracy 0.862, test accuracy 0.781
    -epoch   25: train accuracy 0.890, test accuracy 0.815
    -epoch   30: train accuracy 0.896, test accuracy 0.850
    -epoch   35: train accuracy 0.920, test accuracy 0.866
    -epoch   40: train accuracy 0.930, test accuracy 0.875
    -epoch   45: train accuracy 0.940, test accuracy 0.882
    -epoch   50: train accuracy 0.954, test accuracy 0.885
    -epoch   55: train accuracy 0.964, test accuracy 0.889
    -epoch   60: train accuracy 0.968, test accuracy 0.891
    -epoch   65: train accuracy 0.972, test accuracy 0.893
    -epoch   70: train accuracy 0.978, test accuracy 0.895
    -epoch   75: train accuracy 0.982, test accuracy 0.896
    -epoch   80: train accuracy 0.988, test accuracy 0.898
    -epoch   85: train accuracy 0.994, test accuracy 0.899
    -epoch   90: train accuracy 0.996, test accuracy 0.899
    -epoch   95: train accuracy 0.998, test accuracy 0.901
    -epoch  100: train accuracy 1.000, test accuracy 0.901
    -epoch  105: train accuracy 1.000, test accuracy 0.902
    -epoch  110: train accuracy 1.000, test accuracy 0.902
    -epoch  115: train accuracy 1.000, test accuracy 0.902
    -epoch  120: train accuracy 1.000, test accuracy 0.902
    -epoch  125: train accuracy 1.000, test accuracy 0.903
    -epoch  130: train accuracy 1.000, test accuracy 0.902
    -epoch  135: train accuracy 1.000, test accuracy 0.904
    -epoch  140: train accuracy 1.000, test accuracy 0.903
    -epoch  145: train accuracy 1.000, test accuracy 0.903
    -epoch  150: train accuracy 1.000, test accuracy 0.903
    -epoch  155: train accuracy 1.000, test accuracy 0.903
    -epoch  160: train accuracy 1.000, test accuracy 0.903
    -epoch  165: train accuracy 1.000, test accuracy 0.903
    -epoch  170: train accuracy 1.000, test accuracy 0.903
    -epoch  175: train accuracy 1.000, test accuracy 0.903
    -epoch  180: train accuracy 1.000, test accuracy 0.903
    -epoch  185: train accuracy 1.000, test accuracy 0.903
    -epoch  190: train accuracy 1.000, test accuracy 0.903
    -epoch  195: train accuracy 1.000, test accuracy 0.903
    -epoch  200: train accuracy 1.000, test accuracy 0.902
    -  7.121126 seconds (3.18 M allocations: 274.946 MiB, 1.54% gc time)

    As we can see, the accuracy on the training set is around a 100%, while the accuracy on the test set peaks around 90%. For a mere 500 training examples, this isn't actually that bad of a result.

    Integrating Augmentor

    Now that we have a network architecture with a baseline to compare to, let us finally see what it takes to add Augmentor to our experiment. First, we need to include the package to our experiment.

    using Augmentor

    The next step, and maybe the most human-hour consuming part of adding image augmentation to a prediction problem, is to design and select a sensible augmentation pipeline. Take a look at the elastic distortions tutorial for an example of how to do just that.

    For this example, we already choose a quite complicated but promising augmentation pipeline for you. This pipeline was designed to yield a large variation of effects as well as to showcase how even deep pipelines are quite efficient in terms of performance.

    pl = Reshape(28,28) |>
    -     PermuteDims(2,1) |>
    -     ShearX(-5:5) * ShearY(-5:5) |>
    -     Rotate(-15:15) |>
    -     CropSize(28,28) |>
    -     Zoom(0.9:0.1:1.2) |>
    -     CacheImage() |>
    -     ElasticDistortion(10) |>
    -     PermuteDims(2,1) |>
    -     Reshape(28,28,1)
    10-step Augmentor.ImmutablePipeline:
    -  1.) Reshape array to 28×28
    -  2.) Permute dimension order to (2, 1)
    -  3.) Either: (50%) ShearX by ϕ ∈ -5:5 degree. (50%) ShearY by ψ ∈ -5:5 degree.
    -  4.) Rotate by θ ∈ -15:15 degree
    -  5.) Crop a 28×28 window around the center
    -  6.) Zoom by I ∈ {0.9×0.9, 1.0×1.0, 1.1×1.1, 1.2×1.2}
    -  7.) Cache into temporary buffer
    -  8.) Distort using a smoothed and normalized 10×10 grid with pinned border
    -  9.) Permute dimension order to (2, 1)
    - 10.) Reshape array to 28×28×1

    Most of the used operations are quite self explanatory, but there are some details about this pipeline worth pointing out explicitly.

    1. We use the operation PermuteDims to convert the horizontal-major MNIST image to a julia-native vertical-major image. The vertical-major image is then processed and converted back to a horizontal-major array. We mainly do this here to showcase the option, but it is also to keep consistent with how the data is usually used in the literature. Alternatively, one could just work with the MNIST data in a vertical-major format all the way through without any issue.

    1. As counter-intuitive as it sounds, the operation CacheImage right before ElasticDistortion is actually used to improve performance. If we were to omit it, then the whole pipeline would be applied in one single pass. In this case, applying distortions on top of affine transformations lazily is in fact less efficient than using a temporary variable.

    With the pipeline now defined, let us quickly peek at what kind of effects we can achieve with it. In particular, lets apply the pipeline multiple times to the first training image and look at what kind of results it produces.

    [MNIST.convert2image(reshape(augment(train_x[:,:,:,1], pl), (28, 28))) for i in 1:8, j in 1:2]

    augmented samples

    As we can see, we can achieve a wide range of effects, from more subtle to more pronounced. The important part is that all examples are still clearly representative of the true label.

    Next, we have to adapt the function train_baseline to make use of our augmentation pipeline. To integrate Augmentor efficiently, there are three necessary changes we have to make.

    1. Preallocate a buffer with the same size and element type that each batch has.

      batch_x_aug = zeros(Float32, 28, 28, 1, batchsize)
    1. Add a call to augmentbatch! in the inner loop of the batch iterator using our pipeline and buffer.

      augmentbatch!(batch_x_aug, batch_x_org, pl)
    1. Replace batch_x_org with batch_x_aug in the constructor of KnetArray.

      batch_x = KnetArray{Float32}(batch_x_aug)

    Applying these changes to our train_baseline function will give us something similar to the following function. Note how all the other parts of the function remain exactly the same as before.

    function train_augmented(; epochs = 500, batchsize = 100, lr = .03)
    -    w = weights()
    -    log = MVHistory()
    -    batch_x_aug = zeros(Float32, size(train_x,1), size(train_x,2), 1, batchsize)
    -    for epoch in 1:epochs
    -        for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)
    -            augmentbatch!(CPUThreads(), batch_x_aug, batch_x_cpu, pl)
    -            batch_x = KnetArray{Float32}(batch_x_aug)
    -            g = costgrad(w, batch_x, batch_y)
    -            Knet.update!(w, g, lr = lr)
    -        end
    -
    -        if (epoch % 5) == 0
    -            train = acc(w, train_x, train_y)
    -            test  = acc(w, test_x,  test_y)
    -            @trace log epoch train test
    -            msg = "epoch " * lpad(epoch,4) * ": train accuracy " * rpad(round(train,3),5,"0") * ", test accuracy " * rpad(round(test,3),5,"0")
    -            println(msg)
    -        end
    -    end
    -    log
    -end

    You may have noticed in the code above that we also pass a CPUThreads() as the first argument to augmentbatch!. This instructs Augmentor to process the images of the batch in parallel using multi-threading. For this to work properly you will need to set the environment variable JULIA_NUM_THREADS to the number of threads you wish to use. You can check how many threads are used with the function Threads.nthreads()

    @show Threads.nthreads();
    Threads.nthreads() = 10

    Now that all pieces are in place, let us train our network once more. We will use the same parameters except that now instead of the original training images we will be using randomly augmented images. This will cause every epoch to be different.

    train_augmented(epochs=1) # warm-up
    -augmented_log = @time train_augmented(epochs=200);
    epoch    5: train accuracy 0.526, test accuracy 0.464
    -epoch   10: train accuracy 0.646, test accuracy 0.559
    -epoch   15: train accuracy 0.742, test accuracy 0.684
    -epoch   20: train accuracy 0.786, test accuracy 0.732
    -epoch   25: train accuracy 0.846, test accuracy 0.798
    -epoch   30: train accuracy 0.864, test accuracy 0.823
    -epoch   35: train accuracy 0.872, test accuracy 0.833
    -epoch   40: train accuracy 0.896, test accuracy 0.869
    -epoch   45: train accuracy 0.908, test accuracy 0.881
    -epoch   50: train accuracy 0.918, test accuracy 0.890
    -epoch   55: train accuracy 0.922, test accuracy 0.891
    -epoch   60: train accuracy 0.926, test accuracy 0.897
    -epoch   65: train accuracy 0.936, test accuracy 0.911
    -epoch   70: train accuracy 0.946, test accuracy 0.899
    -epoch   75: train accuracy 0.936, test accuracy 0.898
    -epoch   80: train accuracy 0.950, test accuracy 0.916
    -epoch   85: train accuracy 0.924, test accuracy 0.881
    -epoch   90: train accuracy 0.958, test accuracy 0.921
    -epoch   95: train accuracy 0.968, test accuracy 0.933
    -epoch  100: train accuracy 0.976, test accuracy 0.928
    -epoch  105: train accuracy 0.982, test accuracy 0.932
    -epoch  110: train accuracy 0.982, test accuracy 0.925
    -epoch  115: train accuracy 0.986, test accuracy 0.934
    -epoch  120: train accuracy 0.982, test accuracy 0.932
    -epoch  125: train accuracy 0.992, test accuracy 0.946
    -epoch  130: train accuracy 0.992, test accuracy 0.944
    -epoch  135: train accuracy 0.992, test accuracy 0.940
    -epoch  140: train accuracy 0.988, test accuracy 0.930
    -epoch  145: train accuracy 0.990, test accuracy 0.943
    -epoch  150: train accuracy 0.992, test accuracy 0.936
    -epoch  155: train accuracy 0.992, test accuracy 0.949
    -epoch  160: train accuracy 0.996, test accuracy 0.945
    -epoch  165: train accuracy 0.992, test accuracy 0.948
    -epoch  170: train accuracy 0.992, test accuracy 0.928
    -epoch  175: train accuracy 0.998, test accuracy 0.948
    -epoch  180: train accuracy 0.998, test accuracy 0.952
    -epoch  185: train accuracy 0.998, test accuracy 0.942
    -epoch  190: train accuracy 0.996, test accuracy 0.948
    -epoch  195: train accuracy 0.998, test accuracy 0.949
    -epoch  200: train accuracy 0.998, test accuracy 0.953
    - 26.931174 seconds (38.83 M allocations: 21.677 GiB, 13.46% gc time)

    As we can see, our network reaches far better results on our testset than our baseline network did. However, we can also see that the training took quite a bit longer than before. This difference generally decreases as the complexity of the utilized neural network increases. Yet another way to improve performance (aside from simplifying the augmentation pipeline) would be to increase the number of available threads.

    Improving Performance

    One of the most effective ways to make the most out of the available resources is to augment the next (couple) mini-batch while the current minibatch is being processed on the GPU. We can do this via julia's build in parallel computing capabilities

    First we need a worker process that will be responsible for augmenting our dataset each epoch. This worker also needs access to a couple of our packages

    # addprocs(1)
    -# @everywhere using Augmentor, MLDataUtils

    Next, we replace the inner eachbatch loop with a more complicated version using a RemoteChannel to exchange and queue the augmented data.

    function async_train_augmented(; epochs = 500, batchsize = 100, lr = .03)
    -    w = weights()
    -    log = MVHistory()
    -    for epoch in 1:epochs
    -        @sync begin
    -            local_ch = Channel{Tuple}(4) # prepare up to 4 minibatches in adavnce
    -            remote_ch = RemoteChannel(()->local_ch)
    -            @spawn begin
    -                # This block is executed on the worker process
    -                batch_x_aug = zeros(Float32, size(train_x,1), size(train_x,2), 1, batchsize)
    -                for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)
    -                    # we are still using multithreading
    -                    augmentbatch!(CPUThreads(), batch_x_aug, batch_x_cpu, pl)
    -                    put!(remote_ch, (batch_x_aug, batch_y))
    -                end
    -                close(remote_ch)
    -            end
    -            @async begin
    -                # This block is executed on the main process
    -                for (batch_x_aug, batch_y) in local_ch
    -                    batch_x = KnetArray{Float32}(batch_x_aug)
    -                    g = costgrad(w, batch_x, batch_y)
    -                    Knet.update!(w, g, lr = lr)
    -                end
    -            end
    -        end
    -
    -        if (epoch % 5) == 0
    -            train = acc(w, train_x, train_y)
    -            test  = acc(w, test_x,  test_y)
    -            @trace log epoch train test
    -            msg = "epoch " * lpad(epoch,4) * ": train accuracy " * rpad(round(train,3),5,"0") * ", test accuracy " * rpad(round(test,3),5,"0")
    -            println(msg)
    -        end
    -    end
    -    log
    -end

    Note that for this toy example the overhead of this approach is greater than the benefit.

    Visualizing the Results

    Before we end this tutorial, let us make use the Plots.jl package to visualize and discuss the recorded training curves. We will plot the accuracy curves of both networks side by side in order to get a good feeling about their differences.

    using Plots
    -pyplot()
    plt = plot(
    -    plot(baseline_log,  title="Baseline",  ylim=(.5,1)),
    -    plot(augmented_log, title="Augmented", ylim=(.5,1)),
    -    size = (900, 400),
    -    xlab = "Epoch",
    -    ylab = "Accuracy",
    -    markersize = 1
    -)

    learning curves

    Note how the accuracy on the (unaltered) training set increases faster for the baseline network than for the augmented one. This is to be expected, since our augmented network doesn't actually use the unaltered images for training, and thus has not actually seen them. Given this information, it is worth pointing out explicitly how the accuracy on training set is still greater than on the test set for the augmented network as well. This is also not a surprise, given that the augmented images are likely more similar to their original ones than to the test images.

    For the baseline network, the accuracy on the test set plateaus quite quickly (around 90%). For the augmented network on the other hand, it the accuracy keeps increasing for quite a while longer.

    References

    [MNIST1998]

    LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. "The MNIST database of handwritten digits" Website. 1998.

    diff --git a/generated/mnist_knet_aug.png b/generated/mnist_knet_aug.png deleted file mode 100644 index 24e96f2a..00000000 Binary files a/generated/mnist_knet_aug.png and /dev/null differ diff --git a/generated/mnist_knet_curves.png b/generated/mnist_knet_curves.png deleted file mode 100644 index e6c20aa5..00000000 Binary files a/generated/mnist_knet_curves.png and /dev/null differ diff --git a/generated/mnist_knet_test.png b/generated/mnist_knet_test.png deleted file mode 100644 index 8639231b..00000000 Binary files a/generated/mnist_knet_test.png and /dev/null differ diff --git a/generated/mnist_knet_train.png b/generated/mnist_knet_train.png deleted file mode 100644 index 8f9536cc..00000000 Binary files a/generated/mnist_knet_train.png and /dev/null differ diff --git a/generated/mnist_tensorflow.ipynb b/generated/mnist_tensorflow.ipynb deleted file mode 100644 index e09c9c00..00000000 --- a/generated/mnist_tensorflow.ipynb +++ /dev/null @@ -1,942 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# MNIST: TensorFlow CNN\n", - "\n", - "In this tutorial we will adapt the\n", - "[MNIST example](https://github.com/malmaud/TensorFlow.jl/blob/master/examples/mnist_full.jl)\n", - "from [TensorFlow.jl](https://github.com/malmaud/TensorFlow.jl)\n", - "to utilize a custom augmentation pipeline.\n", - "In order to showcase the effect that image augmentation can\n", - "have on a neural network's ability to generalize, we will\n", - "limit the training set to just the first 500 images (of the\n", - "available 60,000!). For more information on the dataset see\n", - "[MNIST1998].\n", - "\n", - "\n", - "## Preparing the MNIST dataset\n", - "\n", - "In order to access, prepare, and visualize the MNIST images we\n", - "employ the help of three additional Julia packages. In the\n", - "interest of time and space we will not go into great detail\n", - "about their functionality. Feel free to click on their\n", - "respective names to find out more information about the\n", - "utility they can provide.\n", - "\n", - "- [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl)\n", - " has an MNIST submodule that offers a convenience interface\n", - " to read the MNIST database.\n", - "\n", - "- [Images.jl](https://github.com/JuliaImages/Images.jl) will\n", - " provide us with the necessary tools to process and display\n", - " the image data in Julia / Juypter.\n", - "\n", - "- [MLDataUtils.jl](https://github.com/JuliaML/MLDataUtils.jl)\n", - " implements a variety of functions to convert and partition\n", - " Machine Learning datasets. This will help us prepare the\n", - " MNIST data to be used with TensorFlow." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "using Images, MLDatasets, MLDataUtils\n", - "srand(42);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As you may have seen previously in the\n", - "[elastic distortions tutorial](@ref elastic), the function\n", - "`MNIST.traintensor` returns the MNIST training images\n", - "corresponding to the given indices as a multi-dimensional\n", - "array. These images are stored in the native horizontal-major\n", - "memory layout as a single array of `Float64`. All the\n", - "individual values are scaled to be between `0.0` and `1.0`.\n", - "Also note, how the observations are laid out along the last\n", - "array dimension" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(MNIST.traintensor(1:500)) = \"28×28×500 Array{Float64,3}\"\n" - ] - } - ], - "source": [ - "@show summary(MNIST.traintensor(1:500));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The corresponding label of each image is stored as an integer\n", - "value between `0` and `9`. That means that if the label has\n", - "the value `3`, then the corresponding image is known to be a\n", - "handwritten \"3\". To show a more concrete example, the\n", - "following code reveals that the first training image denotes a\n", - "\"5\" and the second training image a \"0\" (etc)." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(MNIST.trainlabels(1:500)) = \"500-element Array{Int64,1}\"\n", - "First eight labels: 5, 0, 4, 1, 9, 2, 1, 3\n" - ] - } - ], - "source": [ - "@show summary(MNIST.trainlabels(1:500))\n", - "println(\"First eight labels: \", join(MNIST.trainlabels(1:8),\", \"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For TensorFlow we will require a slightly different dimension\n", - "layout for the images. More specifically, we will move the\n", - "observations into the first array dimension. The labels will\n", - "be transformed into a one-of-k matrix. For performance reasons,\n", - "we will further convert all the numerical values to be of type\n", - "`Float32`. We will do all this by creating a little utility\n", - "function that we will name `prepare_mnist`." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "prepare_mnist" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "\"\"\"\n", - " prepare_mnist(tensor, labels) -> (X, Y)\n", - "\n", - "Change the dimension layout x1×x2×N of the given array\n", - "`tensor` to N×x1×x2 and store the result in `X`.\n", - "The given vector `labels` is transformed into a 10×N\n", - "one-hot matrix `Y`. Both, `X` and `Y`, will have the\n", - "element type `Float32`.\n", - "\"\"\"\n", - "function prepare_mnist(tensor, labels)\n", - " features = convert(Array{Float32}, permutedims(tensor, (3,1,2)))\n", - " targets = convertlabel(LabelEnc.OneOfK{Float32}, labels, 0:9, ObsDim.First())\n", - " features, targets\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With `prepare_mnist` defined, we can now use it in conjunction\n", - "with the functions in the `MLDatasets.MNIST` sub-module to load\n", - "and prepare our training set. Recall that for this tutorial only\n", - "use the first 500 images of the training set will be used." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(train_x) = \"500×28×28 Array{Float32,3}\"\n", - "summary(train_y) = \"500×10 Array{Float32,2}\"\n" - ] - }, - { - "data": { - "text/html": [ - "
    " - ], - "text/plain": [ - "8-element Array{Array{ColorTypes.Gray{Float32},2},1}:\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train_x, train_y = prepare_mnist(MNIST.traintensor(1:500), MNIST.trainlabels(1:500))\n", - "@show summary(train_x) summary(train_y);\n", - "[MNIST.convert2image(train_x[i,:,:]) for i in 1:8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly, we use `MNIST.testtensor` and `MNIST.testlabels`\n", - "to load the full MNIST test set. We will utilize that data to\n", - "measure how well the network is able to generalize with and\n", - "without augmentation." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "summary(test_x) = \"10000×28×28 Array{Float32,3}\"\n", - "summary(test_y) = \"10000×10 Array{Float32,2}\"\n" - ] - }, - { - "data": { - "text/html": [ - "
    " - ], - "text/plain": [ - "8-element Array{Array{ColorTypes.Gray{Float32},2},1}:\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "test_x, test_y = prepare_mnist(MNIST.testtensor(), MNIST.testlabels())\n", - "@show summary(test_x) summary(test_y);\n", - "[MNIST.convert2image(test_x[i,:,:]) for i in 1:8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining the Network\n", - "\n", - "With the dataset prepared, we can now instantiate our neural\n", - "network. To keep things simple, we will use the same\n", - "convolutional network as defined in the\n", - "[MNIST example](https://github.com/malmaud/TensorFlow.jl/blob/master/examples/mnist_full.jl)\n", - "of Julia's TensorFlow package." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2017-09-29 02:28:54.313988: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.\n", - "2017-09-29 02:28:54.314009: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.\n", - "2017-09-29 02:28:54.314013: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.\n", - "2017-09-29 02:28:54.314028: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.\n", - "2017-09-29 02:28:54.314031: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.\n", - "2017-09-29 02:28:54.654851: I tensorflow/core/common_runtime/gpu/gpu_device.cc:955] Found device 0 with properties: \n", - "name: Quadro M6000 24GB\n", - "major: 5 minor: 2 memoryClockRate (GHz) 1.114\n", - "pciBusID 0000:02:00.0\n", - "Total memory: 23.86GiB\n", - "Free memory: 23.48GiB\n", - "2017-09-29 02:28:54.654870: I tensorflow/core/common_runtime/gpu/gpu_device.cc:976] DMA: 0 \n", - "2017-09-29 02:28:54.654874: I tensorflow/core/common_runtime/gpu/gpu_device.cc:986] 0: Y \n", - "2017-09-29 02:28:54.654882: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1045] Creating TensorFlow device (/gpu:0) -> (device: 0, name: Quadro M6000 24GB, pci bus id: 0000:02:00.0)\n" - ] - } - ], - "source": [ - "using TensorFlow, Distributions\n", - "session = Session(Graph());" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "max_pool_2x2 (generic function with 1 method)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function weight_variable(shape...)\n", - " initial = map(Float32, rand(Normal(0, .001), shape...))\n", - " return Variable(initial)\n", - "end\n", - "\n", - "function bias_variable(shape...)\n", - " initial = fill(Float32(.1), shape...)\n", - " return Variable(initial)\n", - "end\n", - "\n", - "function conv2d(x, W)\n", - " nn.conv2d(x, W, [1, 1, 1, 1], \"SAME\")\n", - "end\n", - "\n", - "function max_pool_2x2(x)\n", - " nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], \"SAME\")\n", - "end" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "@tf begin\n", - " x = placeholder(Float32)\n", - " y = placeholder(Float32)\n", - "\n", - " W_conv1 = weight_variable(5, 5, 1, 32)\n", - " b_conv1 = bias_variable(32)\n", - "\n", - " x_image = reshape(x, [-1, 28, 28, 1])\n", - "\n", - " h_conv1 = nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n", - " h_pool1 = max_pool_2x2(h_conv1)\n", - "\n", - " W_conv2 = weight_variable(5, 5, 32, 64)\n", - " b_conv2 = bias_variable(64)\n", - "\n", - " h_conv2 = nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n", - " h_pool2 = max_pool_2x2(h_conv2)\n", - "\n", - " W_fc1 = weight_variable(7*7*64, 1024)\n", - " b_fc1 = bias_variable(1024)\n", - "\n", - " h_pool2_flat = reshape(h_pool2, [-1, 7*7*64])\n", - " h_fc1 = nn.relu(h_pool2_flat * W_fc1 + b_fc1)\n", - "\n", - " keep_prob = placeholder(Float32)\n", - " h_fc1_drop = nn.dropout(h_fc1, keep_prob)\n", - "\n", - " W_fc2 = weight_variable(1024, 10)\n", - " b_fc2 = bias_variable(10)\n", - "\n", - " y_conv = nn.softmax(h_fc1_drop * W_fc2 + b_fc2)\n", - "\n", - " global cross_entropy = reduce_mean(-reduce_sum(y.*log(y_conv+1e-8), axis=[2]))\n", - " global optimizer = train.minimize(train.AdamOptimizer(1e-4), cross_entropy)\n", - "\n", - " correct_prediction = broadcast(==, indmax(y_conv, 2), indmax(y, 2))\n", - " global accuracy = reduce_mean(cast(correct_prediction, Float32))\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Training without Augmentation\n", - "\n", - "In order to get an intuition for how useful augmentation can\n", - "be, we need a sensible baseline to compare to. To that end, we\n", - "will first train the network we just defined using only the\n", - "(unaltered) 500 training examples.\n", - "\n", - "The package\n", - "[ValueHistories.jl](https://github.com/JuliaML/ValueHistories.jl)\n", - "will help us record the accuracy during the training process.\n", - "We will use those logs later to visualize the differences\n", - "between having augmentation or no augmentation." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "using ValueHistories" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To keep things simple, we will not overly optimize our\n", - "training function. Thus, we will be content with using a\n", - "closure. Because both, the baseline and the augmented version,\n", - "will share this \"inefficiency\", we should still get a decent\n", - "enough picture of their performance differences." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "train_baseline (generic function with 1 method)" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function train_baseline(; epochs=500, batchsize=100, reset=true)\n", - " reset && run(session, global_variables_initializer())\n", - " log = MVHistory()\n", - " for epoch in 1:epochs\n", - " for (batch_x, batch_y) in eachbatch(shuffleobs((train_x, train_y), obsdim=1), size=batchsize, obsdim=1)\n", - " run(session, optimizer, Dict(x=>batch_x, y=>batch_y, keep_prob=>0.5))\n", - " end\n", - "\n", - " if (epoch % 50) == 0\n", - " train = run(session, accuracy, Dict(x=>train_x, y=>train_y, keep_prob=>1.0))\n", - " test = run(session, accuracy, Dict(x=>test_x, y=>test_y, keep_prob=>1.0))\n", - " @trace log epoch train test\n", - " msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n", - " println(msg)\n", - " end\n", - " end\n", - " log\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Aside from the accuracy, we will also keep an eye on the\n", - "training time. In particular we would like to see if and how\n", - "the addition of augmentation causes our training time to\n", - "increase." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 50: train accuracy 0.658, test accuracy 0.570\n", - "epoch 100: train accuracy 0.846, test accuracy 0.749\n", - "epoch 150: train accuracy 0.878, test accuracy 0.781\n", - "epoch 200: train accuracy 0.906, test accuracy 0.807\n", - "epoch 250: train accuracy 0.930, test accuracy 0.819\n", - "epoch 300: train accuracy 0.950, test accuracy 0.824\n", - "epoch 350: train accuracy 0.962, test accuracy 0.829\n", - "epoch 400: train accuracy 0.980, test accuracy 0.835\n", - "epoch 450: train accuracy 0.992, test accuracy 0.834\n", - "epoch 500: train accuracy 0.994, test accuracy 0.832\n", - "epoch 550: train accuracy 0.998, test accuracy 0.835\n", - "epoch 600: train accuracy 1.000, test accuracy 0.836\n", - "epoch 650: train accuracy 1.000, test accuracy 0.836\n", - "epoch 700: train accuracy 1.000, test accuracy 0.838\n", - "epoch 750: train accuracy 1.000, test accuracy 0.836\n", - "epoch 800: train accuracy 1.000, test accuracy 0.843\n", - "epoch 850: train accuracy 1.000, test accuracy 0.834\n", - "epoch 900: train accuracy 1.000, test accuracy 0.839\n", - "epoch 950: train accuracy 1.000, test accuracy 0.839\n", - "epoch 1000: train accuracy 1.000, test accuracy 0.840\n", - " 59.346103 seconds (3.15 M allocations: 2.579 GiB, 0.95% gc time)\n" - ] - } - ], - "source": [ - "train_baseline(epochs=1) # warm-up\n", - "baseline_log = @time train_baseline(epochs=1000);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, the accuracy on the training set is around a\n", - "100%, while the accuracy on the test set peaks around 85%. For\n", - "a mere 500 training examples, this isn't actually that bad of\n", - "a result.\n", - "\n", - "## Integrating Augmentor\n", - "\n", - "Now that we have a network architecture with a baseline to\n", - "compare to, let us finally see what it takes to add Augmentor\n", - "to our experiment. First, we need to include the package to\n", - "our experiment." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "using Augmentor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The next step, and maybe the most human-hour consuming part of\n", - "adding image augmentation to a prediction problem, is to\n", - "design and select a sensible augmentation pipeline. Take a\n", - "look at the [elastic distortions tutorial](@ref elastic) for\n", - "an example of how to do just that.\n", - "\n", - "For this example, we already choose a quite complicated but\n", - "promising augmentation pipeline for you. This pipeline was\n", - "designed to yield a large variation of effects as well as to\n", - "showcase how even deep pipelines are quite efficient in terms\n", - "of performance." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "8-step Augmentor.ImmutablePipeline:\n", - " 1.) Permute dimension order to (2, 1)\n", - " 2.) Either: (50%) ShearX by ϕ ∈ -5:5 degree. (50%) ShearY by ψ ∈ -5:5 degree.\n", - " 3.) Rotate by θ ∈ -15:15 degree\n", - " 4.) Crop a 28×28 window around the center\n", - " 5.) Zoom by I ∈ {0.9×0.9, 1.0×1.0, 1.1×1.1, 1.2×1.2}\n", - " 6.) Cache into temporary buffer\n", - " 7.) Distort using a smoothed and normalized 10×10 grid with pinned border\n", - " 8.) Permute dimension order to (2, 1)" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "pl = PermuteDims(2,1) |>\n", - " ShearX(-5:5) * ShearY(-5:5) |>\n", - " Rotate(-15:15) |>\n", - " CropSize(28,28) |>\n", - " Zoom(0.9:0.1:1.2) |>\n", - " CacheImage() |>\n", - " ElasticDistortion(10) |>\n", - " PermuteDims(2,1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Most of the used operations are quite self explanatory, but\n", - "there are some details about this pipeline worth pointing out\n", - "explicitly.\n", - "\n", - "1. We use the operation [`PermuteDims`](@ref) to convert the\n", - " horizontal-major MNIST image to a julia-native\n", - " vertical-major image. The vertical-major image is then\n", - " processed and converted back to a horizontal-major array.\n", - " We mainly do this here to showcase the option, but it is\n", - " also to keep consistent with how the data is usually used\n", - " in the literature. Alternatively, one could just work with\n", - " the MNIST data in a vertical-major format all the way\n", - " through without any issue.\n", - "\n", - "2. As counter-intuitive as it sounds, the operation\n", - " [`CacheImage`](@ref) right before\n", - " [`ElasticDistortion`](@ref) is actually used to improve\n", - " performance. If we were to omit it, then the whole pipeline\n", - " would be applied in one single pass. In this case, applying\n", - " distortions on top of affine transformations lazily is in\n", - " fact less efficient than using a temporary variable.\n", - "\n", - "With the pipeline now defined, let us quickly peek at what\n", - "kind of effects we can achieve with it. In particular, lets\n", - "apply the pipeline multiple times to the first training image\n", - "and look at what kind of results it produces." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
    " - ], - "text/plain": [ - "8×2 Array{Array{ColorTypes.Gray{Float32},2},2}:\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] … ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] \n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(0.971794) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] \n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] \n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] \n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] \n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] … ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] \n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(0.969688) Gray{Float32}(0.87171) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)]\n", - " ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] ColorTypes.Gray{Float32}[Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); … ; Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0); Gray{Float32}(1.0) Gray{Float32}(1.0) … Gray{Float32}(1.0) Gray{Float32}(1.0)] " - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "[MNIST.convert2image(augment(train_x[1,:,:], pl)) for i in 1:8, j in 1:2]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, we can achieve a wide range of effects, from\n", - "more subtle to more pronounced. The important part is that all\n", - "examples are still clearly representative of the true label.\n", - "\n", - "Next, we have to adapt the function `train_baseline` to make\n", - "use of our augmentation pipeline. To integrate Augmentor\n", - "efficiently, there are three necessary changes we have to\n", - "make.\n", - "\n", - "1. Preallocate a buffer with the same size and element type\n", - " that each batch has.\n", - "\n", - " ```\n", - " augmented_x = zeros(Float32, batchsize, 28, 28)\n", - " ```\n", - "\n", - "2. Add a call to [`augmentbatch!`](@ref) in the inner loop of\n", - " the batch iterator using our pipeline and buffer.\n", - "\n", - " ```\n", - " augmentbatch!(augmented_x, batch_x, pl, ObsDim.First())\n", - " ```\n", - "\n", - "3. Replace `x=>batch_x` with `x=>augmented_x` in the call to\n", - " TensorFlow's `run(session, ...)`.\n", - "\n", - "Applying these changes to our `train_baseline` function\n", - "will give us something similar to the following function.\n", - "Note how all the other parts of the function remain exactly\n", - "the same as before." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "train_augmented (generic function with 1 method)" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "function train_augmented(; epochs=500, batchsize=100, reset=true)\n", - " reset && run(session, global_variables_initializer())\n", - " log = MVHistory()\n", - " augm_x = zeros(Float32, batchsize, size(train_x,2), size(train_x,3))\n", - " for epoch in 1:epochs\n", - " for (batch_x, batch_y) in eachbatch(shuffleobs((train_x, train_y), obsdim=1), size=batchsize, obsdim=1)\n", - " augmentbatch!(CPUThreads(), augm_x, batch_x, pl, ObsDim.First())\n", - " run(session, optimizer, Dict(x=>augm_x, y=>batch_y, keep_prob=>0.5))\n", - " end\n", - "\n", - " if (epoch % 50) == 0\n", - " train = run(session, accuracy, Dict(x=>train_x, y=>train_y, keep_prob=>1.0))\n", - " test = run(session, accuracy, Dict(x=>test_x, y=>test_y, keep_prob=>1.0))\n", - " @trace log epoch train test\n", - " msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n", - " println(msg)\n", - " end\n", - " end\n", - " log\n", - "end" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may have noticed in the code above that we also pass a\n", - "`CPUThreads()` as the first argument to [`augmentbatch!`](@ref).\n", - "This instructs Augmentor to process the images of the batch in\n", - "parallel using multi-threading. For this to work properly you\n", - "will need to set the environment variable `JULIA_NUM_THREADS`\n", - "to the number of threads you wish to use. You can check how\n", - "many threads are used with the function `Threads.nthreads()`" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Threads.nthreads() = 12\n" - ] - } - ], - "source": [ - "@show Threads.nthreads();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that all pieces are in place, let us train our network\n", - "once more. We will use the same parameters except that now\n", - "instead of the original training images we will be using\n", - "randomly augmented images. This will cause every epoch to be\n", - "different." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "epoch 50: train accuracy 0.650, test accuracy 0.574\n", - "epoch 100: train accuracy 0.812, test accuracy 0.729\n", - "epoch 150: train accuracy 0.852, test accuracy 0.772\n", - "epoch 200: train accuracy 0.868, test accuracy 0.794\n", - "epoch 250: train accuracy 0.878, test accuracy 0.814\n", - "epoch 300: train accuracy 0.898, test accuracy 0.828\n", - "epoch 350: train accuracy 0.922, test accuracy 0.833\n", - "epoch 400: train accuracy 0.932, test accuracy 0.844\n", - "epoch 450: train accuracy 0.934, test accuracy 0.853\n", - "epoch 500: train accuracy 0.940, test accuracy 0.852\n", - "epoch 550: train accuracy 0.946, test accuracy 0.864\n", - "epoch 600: train accuracy 0.954, test accuracy 0.874\n", - "epoch 650: train accuracy 0.960, test accuracy 0.872\n", - "epoch 700: train accuracy 0.962, test accuracy 0.872\n", - "epoch 750: train accuracy 0.974, test accuracy 0.884\n", - "epoch 800: train accuracy 0.978, test accuracy 0.894\n", - "epoch 850: train accuracy 0.984, test accuracy 0.896\n", - "epoch 900: train accuracy 0.978, test accuracy 0.902\n", - "epoch 950: train accuracy 0.984, test accuracy 0.902\n", - "epoch 1000: train accuracy 0.988, test accuracy 0.909\n", - "124.314467 seconds (120.96 M allocations: 127.304 GiB, 8.08% gc time)\n" - ] - } - ], - "source": [ - "train_augmented(epochs=1) # warm-up\n", - "augmented_log = @time train_augmented(epochs=1000);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, our network reaches far better results on our\n", - "testset than our baseline network did. However, we can also\n", - "see that the training took quite a bit longer than before.\n", - "This difference generally decreases as the complexity of the\n", - "utilized neural network increases. Yet another way to improve\n", - "performance (aside from simplifying the augmentation pipeline)\n", - "would be to increase the number of available threads.\n", - "\n", - "## Visualizing the Results\n", - "\n", - "Before we end this tutorial, let us make use the\n", - "[Plots.jl](https://github.com/JuliaPlots/Plots.jl) package to\n", - "visualize and discuss the recorded training curves.\n", - "We will plot the accuracy curves of both networks side by side\n", - "in order to get a good feeling about their differences." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: No working GUI backend found for matplotlib\n" - ] - }, - { - "data": { - "text/plain": [ - "Plots.PyPlotBackend()" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "using Plots\n", - "pyplot()" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "plt = plot(\n", - " plot(baseline_log, title=\"Accuracy (baseline)\", ylim=(.5,1)),\n", - " plot(augmented_log, title=\"Accuracy (augmented)\", ylim=(.5,1)),\n", - " size = (900, 400),\n", - " markersize = 1\n", - ")\n", - "plt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note how the accuracy on the (unaltered) training set\n", - "increases much faster for the baseline network than for the\n", - "augmented one. This is to be expected, since our augmented\n", - "network doesn't actually use the unaltered images for\n", - "training, and thus has not actually seen them. Given this\n", - "information, it is worth pointing out explicitly how the\n", - "accuracy on training set is still greater than on the test set\n", - "for the augmented network as well. This is also not a\n", - "surprise, given that the augmented images are likely more\n", - "similar to their original ones than to the test images.\n", - "\n", - "For the baseline network, the accuracy on the test set\n", - "plateaus quite quickly (around 85%). For the augmented network\n", - "on the other hand, it the accuracy keeps increasing for quite\n", - "a while longer. If you let the network train long enough you\n", - "can achieve around 97% even before it stops learning.\n", - "\n", - "## References\n", - "\n", - "**MNIST1998**: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. [\"The MNIST database of handwritten digits\"](http://yann.lecun.com/exdb/mnist/) Website. 1998." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 0.6.1-pre", - "language": "julia", - "name": "julia-0.6" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "0.6.1" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/generated/mnist_tensorflow.md.old b/generated/mnist_tensorflow.md.old deleted file mode 100644 index b277556c..00000000 --- a/generated/mnist_tensorflow.md.old +++ /dev/null @@ -1,493 +0,0 @@ -# MNIST: TensorFlow CNN - -In this tutorial we will adapt the -[MNIST example](https://github.com/malmaud/TensorFlow.jl/blob/master/examples/mnist_full.jl) -from [TensorFlow.jl](https://github.com/malmaud/TensorFlow.jl) -to utilize a custom augmentation pipeline. -In order to showcase the effect that image augmentation can -have on a neural network's ability to generalize, we will -limit the training set to just the first 500 images (of the -available 60,000!). For more information on the dataset see -[^MNIST1998]. - -!!! note - - This tutorial is also available as a - [Juypter](https://jupyter.org/) notebook. You can - find a link to the Juypter version of this tutorial - in the top right corner of this page. - -## Preparing the MNIST dataset - -In order to access, prepare, and visualize the MNIST images we -employ the help of three additional Julia packages. In the -interest of time and space we will not go into great detail -about their functionality. Feel free to click on their -respective names to find out more information about the -utility they can provide. - -- [MLDatasets.jl](https://github.com/JuliaML/MLDatasets.jl) - has an MNIST submodule that offers a convenience interface - to read the MNIST database. - -- [Images.jl](https://github.com/JuliaImages/Images.jl) will - provide us with the necessary tools to process and display - the image data in Julia / Juypter. - -- [MLDataUtils.jl](https://github.com/JuliaML/MLDataUtils.jl) - implements a variety of functions to convert and partition - Machine Learning datasets. This will help us prepare the - MNIST data to be used with TensorFlow. - - -```@example mnist_tensorflow -using Images, MLDatasets, MLDataUtils -srand(42); -nothing # hide -``` - -As you may have seen previously in the -[elastic distortions tutorial](@ref elastic), the function -`MNIST.traintensor` returns the MNIST training images -corresponding to the given indices as a multi-dimensional -array. These images are stored in the native horizontal-major -memory layout as a single array of `Float64`. All the -individual values are scaled to be between `0.0` and `1.0`. -Also note, how the observations are laid out along the last -array dimension - - -```@example mnist_tensorflow -@show summary(MNIST.traintensor(1:500)); -nothing # hide -``` - -The corresponding label of each image is stored as an integer -value between `0` and `9`. That means that if the label has -the value `3`, then the corresponding image is known to be a -handwritten "3". To show a more concrete example, the -following code reveals that the first training image denotes a -"5" and the second training image a "0" (etc). - - -```@example mnist_tensorflow -@show summary(MNIST.trainlabels(1:500)) -println("First eight labels: ", join(MNIST.trainlabels(1:8),", ")) -``` - -For TensorFlow we will require a slightly different dimension -layout for the images. More specifically, we will move the -observations into the first array dimension. The labels will -be transformed into a one-of-k matrix. For performance reasons, -we will further convert all the numerical values to be of type -`Float32`. We will do all this by creating a little utility -function that we will name `prepare_mnist`. - - -```@example mnist_tensorflow -""" - prepare_mnist(tensor, labels) -> (X, Y) - -Change the dimension layout x1×x2×N of the given array -`tensor` to N×x1×x2 and store the result in `X`. -The given vector `labels` is transformed into a 10×N -one-hot matrix `Y`. Both, `X` and `Y`, will have the -element type `Float32`. -""" -function prepare_mnist(tensor, labels) - features = convert(Array{Float32}, permutedims(tensor, (3,1,2))) - targets = convertlabel(LabelEnc.OneOfK{Float32}, labels, 0:9, ObsDim.First()) - features, targets -end -nothing # hide -``` - -With `prepare_mnist` defined, we can now use it in conjunction -with the functions in the `MLDatasets.MNIST` sub-module to load -and prepare our training set. Recall that for this tutorial only -use the first 500 images of the training set will be used. - - -```@example mnist_tensorflow -train_x, train_y = prepare_mnist(MNIST.traintensor(1:500), MNIST.trainlabels(1:500)) -@show summary(train_x) summary(train_y); -[MNIST.convert2image(train_x[i,:,:]) for i in 1:8] -tmp = hcat(ans...) # hide -save("mnist_tf_train.png",repeat(tmp, inner=(4,4))) # hide -nothing # hide -``` - -![training images](mnist_tf_train.png) - -Similarly, we use `MNIST.testtensor` and `MNIST.testlabels` -to load the full MNIST test set. We will utilize that data to -measure how well the network is able to generalize with and -without augmentation. - - -```@example mnist_tensorflow -test_x, test_y = prepare_mnist(MNIST.testtensor(), MNIST.testlabels()) -@show summary(test_x) summary(test_y); -[MNIST.convert2image(test_x[i,:,:]) for i in 1:8] -tmp = hcat(ans...) # hide -save("mnist_tf_test.png",repeat(tmp, inner=(4,4))) # hide -nothing # hide -``` - -![test images](mnist_tf_test.png) - -## Defining the Network - -With the dataset prepared, we can now instantiate our neural -network. To keep things simple, we will use the same -convolutional network as defined in the -[MNIST example](https://github.com/malmaud/TensorFlow.jl/blob/master/examples/mnist_full.jl) -of Julia's TensorFlow package. - - -```@example mnist_tensorflow -using TensorFlow, Distributions -session = Session(Graph()); -nothing # hide -``` - -```@example mnist_tensorflow -function weight_variable(shape...) - initial = map(Float32, rand(Normal(0, .001), shape...)) - return Variable(initial) -end - -function bias_variable(shape...) - initial = fill(Float32(.1), shape...) - return Variable(initial) -end - -function conv2d(x, W) - nn.conv2d(x, W, [1, 1, 1, 1], "SAME") -end - -function max_pool_2x2(x) - nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "SAME") -end -nothing # hide -``` - -```@example mnist_tensorflow -@tf begin - x = placeholder(Float32) - y = placeholder(Float32) - - W_conv1 = weight_variable(5, 5, 1, 32) - b_conv1 = bias_variable(32) - - x_image = reshape(x, [-1, 28, 28, 1]) - - h_conv1 = nn.relu(conv2d(x_image, W_conv1) + b_conv1) - h_pool1 = max_pool_2x2(h_conv1) - - W_conv2 = weight_variable(5, 5, 32, 64) - b_conv2 = bias_variable(64) - - h_conv2 = nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - h_pool2 = max_pool_2x2(h_conv2) - - W_fc1 = weight_variable(7*7*64, 1024) - b_fc1 = bias_variable(1024) - - h_pool2_flat = reshape(h_pool2, [-1, 7*7*64]) - h_fc1 = nn.relu(h_pool2_flat * W_fc1 + b_fc1) - - keep_prob = placeholder(Float32) - h_fc1_drop = nn.dropout(h_fc1, keep_prob) - - W_fc2 = weight_variable(1024, 10) - b_fc2 = bias_variable(10) - - y_conv = nn.softmax(h_fc1_drop * W_fc2 + b_fc2) - - global cross_entropy = reduce_mean(-reduce_sum(y.*log(y_conv+1e-8), axis=[2])) - global optimizer = train.minimize(train.AdamOptimizer(1e-4), cross_entropy) - - correct_prediction = broadcast(==, indmax(y_conv, 2), indmax(y, 2)) - global accuracy = reduce_mean(cast(correct_prediction, Float32)) -end -nothing # hide -``` - -## Training without Augmentation - -In order to get an intuition for how useful augmentation can -be, we need a sensible baseline to compare to. To that end, we -will first train the network we just defined using only the -(unaltered) 500 training examples. - -The package -[ValueHistories.jl](https://github.com/JuliaML/ValueHistories.jl) -will help us record the accuracy during the training process. -We will use those logs later to visualize the differences -between having augmentation or no augmentation. - - -```@example mnist_tensorflow -using ValueHistories -``` - -To keep things simple, we will not overly optimize our -training function. Thus, we will be content with using a -closure. Because both, the baseline and the augmented version, -will share this "inefficiency", we should still get a decent -enough picture of their performance differences. - - -```@example mnist_tensorflow -function train_baseline(; epochs=500, batchsize=100, reset=true) - reset && run(session, global_variables_initializer()) - log = MVHistory() - for epoch in 1:epochs - for (batch_x, batch_y) in eachbatch(shuffleobs((train_x, train_y), obsdim=1), size=batchsize, obsdim=1) - run(session, optimizer, Dict(x=>batch_x, y=>batch_y, keep_prob=>0.5)) - end - - if (epoch % 50) == 0 - train = run(session, accuracy, Dict(x=>train_x, y=>train_y, keep_prob=>1.0)) - test = run(session, accuracy, Dict(x=>test_x, y=>test_y, keep_prob=>1.0)) - @trace log epoch train test - msg = "epoch " * lpad(epoch,4) * ": train accuracy " * rpad(round(train,3),5,"0") * ", test accuracy " * rpad(round(test,3),5,"0") - println(msg) - end - end - log -end -nothing # hide -``` - -Aside from the accuracy, we will also keep an eye on the -training time. In particular we would like to see if and how -the addition of augmentation causes our training time to -increase. - - -```@example mnist_tensorflow -train_baseline(epochs=1) # warm-up -baseline_log = @time train_baseline(epochs=1000); -nothing # hide -``` - -As we can see, the accuracy on the training set is around a -100%, while the accuracy on the test set peaks around 85%. For -a mere 500 training examples, this isn't actually that bad of -a result. - -## Integrating Augmentor - -Now that we have a network architecture with a baseline to -compare to, let us finally see what it takes to add Augmentor -to our experiment. First, we need to include the package to -our experiment. - - -```@example mnist_tensorflow -using Augmentor -``` - -The next step, and maybe the most human-hour consuming part of -adding image augmentation to a prediction problem, is to -design and select a sensible augmentation pipeline. Take a -look at the [elastic distortions tutorial](@ref elastic) for -an example of how to do just that. - -For this example, we already choose a quite complicated but -promising augmentation pipeline for you. This pipeline was -designed to yield a large variation of effects as well as to -showcase how even deep pipelines are quite efficient in terms -of performance. - - -```@example mnist_tensorflow -pl = PermuteDims(2,1) |> - ShearX(-5:5) * ShearY(-5:5) |> - Rotate(-15:15) |> - CropSize(28,28) |> - Zoom(0.9:0.1:1.2) |> - CacheImage() |> - ElasticDistortion(10) |> - PermuteDims(2,1) -``` - -Most of the used operations are quite self explanatory, but -there are some details about this pipeline worth pointing out -explicitly. - -1. We use the operation [`PermuteDims`](@ref) to convert the - horizontal-major MNIST image to a julia-native - vertical-major image. The vertical-major image is then - processed and converted back to a horizontal-major array. - We mainly do this here to showcase the option, but it is - also to keep consistent with how the data is usually used - in the literature. Alternatively, one could just work with - the MNIST data in a vertical-major format all the way - through without any issue. - -2. As counter-intuitive as it sounds, the operation - [`CacheImage`](@ref) right before - [`ElasticDistortion`](@ref) is actually used to improve - performance. If we were to omit it, then the whole pipeline - would be applied in one single pass. In this case, applying - distortions on top of affine transformations lazily is in - fact less efficient than using a temporary variable. - -With the pipeline now defined, let us quickly peek at what -kind of effects we can achieve with it. In particular, lets -apply the pipeline multiple times to the first training image -and look at what kind of results it produces. - - -```@example mnist_tensorflow -[MNIST.convert2image(augment(train_x[1,:,:], pl)) for i in 1:8, j in 1:2] -tmp = vcat(hcat(ans[:,1]...), hcat(ans[:,2]...)) # hide -save("mnist_tf_aug.png",repeat(tmp, inner=(4,4))) # hide -nothing # hide -``` - -![augmented samples](mnist_tf_aug.png) - -As we can see, we can achieve a wide range of effects, from -more subtle to more pronounced. The important part is that all -examples are still clearly representative of the true label. - -Next, we have to adapt the function `train_baseline` to make -use of our augmentation pipeline. To integrate Augmentor -efficiently, there are three necessary changes we have to -make. - -1. Preallocate a buffer with the same size and element type - that each batch has. - - ``` - augmented_x = zeros(Float32, batchsize, 28, 28) - ``` - -2. Add a call to [`augmentbatch!`](@ref) in the inner loop of - the batch iterator using our pipeline and buffer. - - ``` - augmentbatch!(augmented_x, batch_x, pl, ObsDim.First()) - ``` - -3. Replace `x=>batch_x` with `x=>augmented_x` in the call to - TensorFlow's `run(session, ...)`. - -Applying these changes to our `train_baseline` function -will give us something similar to the following function. -Note how all the other parts of the function remain exactly -the same as before. - - -```@example mnist_tensorflow -function train_augmented(; epochs=500, batchsize=100, reset=true) - reset && run(session, global_variables_initializer()) - log = MVHistory() - augm_x = zeros(Float32, batchsize, size(train_x,2), size(train_x,3)) - for epoch in 1:epochs - for (batch_x, batch_y) in eachbatch(shuffleobs((train_x, train_y), obsdim=1), size=batchsize, obsdim=1) - augmentbatch!(CPUThreads(), augm_x, batch_x, pl, ObsDim.First()) - run(session, optimizer, Dict(x=>augm_x, y=>batch_y, keep_prob=>0.5)) - end - - if (epoch % 50) == 0 - train = run(session, accuracy, Dict(x=>train_x, y=>train_y, keep_prob=>1.0)) - test = run(session, accuracy, Dict(x=>test_x, y=>test_y, keep_prob=>1.0)) - @trace log epoch train test - msg = "epoch " * lpad(epoch,4) * ": train accuracy " * rpad(round(train,3),5,"0") * ", test accuracy " * rpad(round(test,3),5,"0") - println(msg) - end - end - log -end -nothing # hide -``` - -You may have noticed in the code above that we also pass a -`CPUThreads()` as the first argument to [`augmentbatch!`](@ref). -This instructs Augmentor to process the images of the batch in -parallel using multi-threading. For this to work properly you -will need to set the environment variable `JULIA_NUM_THREADS` -to the number of threads you wish to use. You can check how -many threads are used with the function `Threads.nthreads()` - - -```@example mnist_tensorflow -@show Threads.nthreads(); -nothing # hide -``` - -Now that all pieces are in place, let us train our network -once more. We will use the same parameters except that now -instead of the original training images we will be using -randomly augmented images. This will cause every epoch to be -different. - - -```@example mnist_tensorflow -train_augmented(epochs=1) # warm-up -augmented_log = @time train_augmented(epochs=1000); -nothing # hide -``` - -As we can see, our network reaches far better results on our -testset than our baseline network did. However, we can also -see that the training took quite a bit longer than before. -This difference generally decreases as the complexity of the -utilized neural network increases. Yet another way to improve -performance (aside from simplifying the augmentation pipeline) -would be to increase the number of available threads. - -## Visualizing the Results - -Before we end this tutorial, let us make use the -[Plots.jl](https://github.com/JuliaPlots/Plots.jl) package to -visualize and discuss the recorded training curves. -We will plot the accuracy curves of both networks side by side -in order to get a good feeling about their differences. - - -```@example mnist_tensorflow -using Plots -pyplot() -nothing # hide -``` - -```@example mnist_tensorflow -default(bg_outside=colorant"#FFFFFF") # hide -plt = plot( - plot(baseline_log, title="Accuracy (baseline)", ylim=(.5,1)), - plot(augmented_log, title="Accuracy (augmented)", ylim=(.5,1)), - size = (900, 400), - markersize = 1 -) -png(plt, "mnist_tf_curves.png") # hide -nothing # hide -``` -![learning curves](mnist_tf_curves.png) - -Note how the accuracy on the (unaltered) training set -increases much faster for the baseline network than for the -augmented one. This is to be expected, since our augmented -network doesn't actually use the unaltered images for -training, and thus has not actually seen them. Given this -information, it is worth pointing out explicitly how the -accuracy on training set is still greater than on the test set -for the augmented network as well. This is also not a -surprise, given that the augmented images are likely more -similar to their original ones than to the test images. - -For the baseline network, the accuracy on the test set -plateaus quite quickly (around 85%). For the augmented network -on the other hand, it the accuracy keeps increasing for quite -a while longer. If you let the network train long enough you -can achieve around 97% even before it stops learning. - -## References - -[^MNIST1998]: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. ["The MNIST database of handwritten digits"](http://yann.lecun.com/exdb/mnist/) Website. 1998. - diff --git a/gettingstarted/index.html b/gettingstarted/index.html deleted file mode 100644 index 09f150cc..00000000 --- a/gettingstarted/index.html +++ /dev/null @@ -1,24 +0,0 @@ - -Getting Started · Augmentor.jl

    Getting Started

    Getting Started

    In this section we will provide a condensed overview of the package. In order to keep this overview concise, we will not discuss any background information or theory on the losses here in detail.

    Installation

    To install Augmentor.jl, start up Julia and type the following code-snipped into the REPL. It makes use of the native Julia package manger.

    Pkg.add("Augmentor")

    Additionally, for example if you encounter any sudden issues, or in the case you would like to contribute to the package, you can manually choose to be on the latest (untagged) version.

    Pkg.checkout("Augmentor")

    Example

    The following code snippet shows how a stochastic augmentation pipeline can be specified using simple building blocks that we call "operations". In order to give the example some meaning, we will use a real medical image from the publicly available ISIC archive as input. The concrete image can be downloaded here using their Web API.

    julia> using Augmentor, ISICArchive
    -
    -julia> img = get(ImageThumbnailRequest(id = "5592ac599fc3c13155a57a85"))
    -169×256 Array{RGB{N0f8},2}:
    -[...]
    -
    -julia> pl = Either(1=>FlipX(), 1=>FlipY(), 2=>NoOp()) |>
    -            Rotate(0:360) |>
    -            ShearX(-5:5) * ShearY(-5:5) |>
    -            CropSize(165, 165) |>
    -            Zoom(1:0.05:1.2) |>
    -            Resize(64, 64)
    -6-step Augmentor.ImmutablePipeline:
    - 1.) Either: (25%) Flip the X axis. (25%) Flip the Y axis. (50%) No operation.
    - 2.) Rotate by θ ∈ 0:360 degree
    - 3.) Either: (50%) ShearX by ϕ ∈ -5:5 degree. (50%) ShearY by ψ ∈ -5:5 degree.
    - 4.) Crop a 165×165 window around the center
    - 5.) Zoom by I ∈ {1.0×1.0, 1.05×1.05, 1.1×1.1, 1.15×1.15, 1.2×1.2}
    - 6.) Resize to 64×64
    -
    -julia> img_new = augment(img, pl)
    -64×64 Array{RGB{N0f8},2}:
    -[...]

    The function augment will generate a single augmented image from the given input image and pipeline. To visualize the effect we compiled a few resulting output images into a GIF using the plotting library Plots.jl with the PyPlot.jl back-end. You can inspect the full code by clicking on "Edit on Github" in the top right corner of this page.

    Input (img)Output (img_new)
    inputoutput

    Getting Help

    To get help on specific functionality you can either look up the information here, or if you prefer you can make use of Julia's native doc-system. The following example shows how to get additional information on augment within Julia's REPL:

    ?augment

    If you find yourself stuck or have other questions concerning the package you can find us at gitter or the Machine Learning domain on discourse.julialang.org

    If you encounter a bug or would like to participate in the development of this package come find us on Github.

    diff --git a/images/index.html b/images/index.html deleted file mode 100644 index 9ee207b5..00000000 --- a/images/index.html +++ /dev/null @@ -1,80 +0,0 @@ - -Working with Images in Julia · Augmentor.jl

    Working with Images in Julia

    Working with Images in Julia

    The Julia language provides a rich syntax as well as large set of highly-optimized functionality for working with (multi-dimensional) arrays of what is known as "bit types" or compositions of such. Because of this, the language lends itself particularly well to the fairly simple idea of treating images as just plain arrays. Even though this may sound as a rather tedious low-level approach, Julia makes it possible to still allow for powerful abstraction layers without the loss of generality that usually comes with that. This is accomplished with help of Julia's flexible type system and multiple dispatch (both of which are beyond the scope of this tutorial).

    While the images-are-arrays-approach makes working with images in Julia very performant, it has also been source of confusion to new community members. This beginner's guide is an attempt to provide a step-by-step overview of how pixel data is handled in Julia. To get a more detailed explanation on some particular concept involved, please take a look at the documentation of the JuliaImages ecosystem.

    Multi-dimensional Arrays

    To wrap our heads around Julia's array-based treatment of images, we first need to understand what Julia arrays are and how we can work with them.

    Note

    This section is only intended provide a simplified and thus partial overview of Julia's arrays capabilities in order to gain some intuition about pixel data. For a more detailed treatment of the topic please have a look at the official documentation

    Whenever we work with an Array in which the elements are bit-types (e.g. Int64, Float32, UInt8, etc), we can think of the array as a continuous block of memory. This is useful for many different reasons, such as cache locality and interacting with external libraries.

    The same block of memory can be interpreted in a number of ways. Consider the following example in which we allocate a vector (i.e. a one dimensional array) of UInt8 (i.e. bytes) with some ordered example values ranging from 1 to 6. We will think of this as our physical memory block, since it is a pretty close representation.

    julia> memory = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6]
    -6-element Array{UInt8,1}:
    - 0x01
    - 0x02
    - 0x03
    - 0x04
    - 0x05
    - 0x06

    The same block of memory could also be interpreted differently. For example we could think of this as a matrix with 3 rows and 2 columns instead (or even the other way around). The function reinterpret allows us to do just that

    julia> A = reinterpret(UInt8, memory, (3,2))
    -3×2 Array{UInt8,2}:
    - 0x01  0x04
    - 0x02  0x05
    - 0x03  0x06

    Note how we specified the number of rows first. This is because the Julia language follows the column-major convention for multi dimensional arrays. What this means can be observed when we compare our new matrix A with the initial vector memory and look at the element layout. Both variables are using the same underlying memory (i.e the value 0x01 is physically stored right next to the value 0x02 in our example, while 0x01 and 0x04 are quite far apart even though the matrix interpretation makes it look like they are neighbors; which they are not).

    Tip

    A quick and dirty way to check if two variables are representing the same block of memory is by comparing the output of pointer(myvariable). Note, however, that technically this only tells you where a variable starts in memory and thus has its limitations.

    This idea can also be generalized for higher dimensions. For example we can think of this as a 3D array as well.

    julia> reinterpret(UInt8, memory, (3,1,2))
    -3×1×2 Array{UInt8,3}:
    -[:, :, 1] =
    - 0x01
    - 0x02
    - 0x03
    -
    -[:, :, 2] =
    - 0x04
    - 0x05
    - 0x06

    If you take a closer look at the dimension sizes, you can see that all we did in that example was add a new dimension of size 1, while not changing the other numbers. In fact we can add any number of practically empty dimensions, otherwise known as singleton dimensions.

    julia> reinterpret(UInt8, memory, (3,1,1,1,2))
    -3×1×1×1×2 Array{UInt8,5}:
    -[:, :, 1, 1, 1] =
    - 0x01
    - 0x02
    - 0x03
    -
    -[:, :, 1, 1, 2] =
    - 0x04
    - 0x05
    - 0x06

    This is a useful property to have when we are confronted with greyscale datasets that do not have a color channel, yet we still want to work with a library that expects the images to have one.

    Vertical-Major vs Horizontal-Major

    There are a number of different conventions for how to store image data into a binary format. The first question one has to address is the order in which the image dimensions are transcribed.

    We have seen before that Julia follows the column-major convention for its arrays, which for images would lead to the corresponding convention of being vertical-major. In the image domain, however, it is fairly common to store the pixels in a horizontal-major layout. In other words, horizontal-major means that images are stored in memory (or file) one pixel row after the other.

    In most cases, when working within the JuliaImages ecosystem, the images should already be in the Julia-native column major layout. If for some reason that is not the case there are two possible ways to convert the image to that format.

    julia> At = reinterpret(UInt8, memory, (3,2))' # "row-major" layout
    -2×3 Array{UInt8,2}:
    - 0x01  0x02  0x03
    - 0x04  0x05  0x06
    1. The first way to alter the pixel order is by using the function Base.permutedims. In contrast to what we have seen before, this function will allocate a new array and copy the values in the appropriate manner.

      julia> B = permutedims(At, (2,1))
      -3×2 Array{UInt8,2}:
      - 0x01  0x04
      - 0x02  0x05
      - 0x03  0x06
    2. The second way is using the function ImageCore.permuteddimsview which results in a lazy view that does not allocate a new array but instead only computes the correct values when queried.

      julia> using ImageCore
      -
      -julia> C = permuteddimsview(At, (2,1))
      -3×2 PermutedDimsArray(::Array{UInt8,2}, (2, 1)) with element type UInt8:
      - 0x01  0x04
      - 0x02  0x05
      - 0x03  0x06

    Either way, it is in general a good idea to make sure that the array one is working with ends up in a column-major layout.

    Reinterpreting Elements

    Up to this point, all we talked about was how to reinterpreting or permuting the dimensional layout of some continuous memory block. If you look at the examples above you will see that all the arrays have elements of type UInt8, which just means that each element is represented by a single byte in memory.

    Knowing all this, we can now take the idea a step further and think about reinterpreting the element types of the array. Let us consider our original vector memory again.

    julia> memory = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6]
    -6-element Array{UInt8,1}:
    - 0x01
    - 0x02
    - 0x03
    - 0x04
    - 0x05
    - 0x06

    Note how each byte is thought of as an individual element. One thing we could do instead, is think of this memory block as a vector of 3 UInt16 elements.

    julia> reinterpret(UInt16, memory)
    -3-element Array{UInt16,1}:
    - 0x0201
    - 0x0403
    - 0x0605

    Pay attention to where our original bytes ended up. In contrast to just rearranging elements as we did before, we ended up with significantly different element values. One may ask why it would ever be practical to reinterpret a memory block like this. The one word answer to this is Colors! As we will see in the remainder of this tutorial, it turns out to be a very useful thing to do when your arrays represent pixel data.

    Introduction to Color Models

    As we discussed before, there are a various number of conventions on how to store pixel data into a binary format. That is not only true for dimension priority, but also for color information.

    One way color information can differ is in the color model in which they are described in. Two famous examples for color models are RGB and HSV. They essentially define how colors are conceptually made up in terms of some components. Additionally, one can decide on how many bits to use to describe each color component. By doing so one defines the available color depth.

    Before we look into using the actual implementation of Julia's color models, let us prototype our own imperfect toy model in order to get a better understanding of what is happening under the hood.

    # define our toy color model
    -struct MyRGB
    -    r::UInt8
    -    b::UInt8
    -    g::UInt8
    -end

    Note how we defined our new toy color model as struct. Because of this and the fact that all its components are bit types (in this case UInt8), any instantiation of our new type will be represented as a continuous block of memory as well.

    We can now apply our color model to our memory vector from above, and interpret the underlying memory as a vector of to MyRGB values instead.

    julia> reinterpret(MyRGB, memory)
    -2-element Array{MyRGB,1}:
    - MyRGB(0x01,0x02,0x03)
    - MyRGB(0x04,0x05,0x06)

    Similar to the UInt16 example, we now group neighboring bytes into larger units (namely MyRGB). In contrast to the UInt16 example we are still able to access the individual components underneath. This simple toy color model already allows us to do a lot of useful things. We could define functions that work on MyRGB values in a color-space appropriate fashion. We could also define other color models and implement function to convert between them.

    However, our little toy color model is not yet optimal. For example it hard-codes a predefined color depth of 24 bit. We may have use-cases where we need a richer color space. One thing we could do to achieve that would be to introduce a new type in similar fashion. Still, because they have a different range of available numbers per channel (because they have a different amount of bits per channel), we would have to write a lot of specialized code to be able to appropriately handle all color models and depth.

    Luckily, the creators of ColorTypes.jl went a with a more generic strategy: Using parameterized types and fixed point numbers.

    Tip

    If you are interested in how various color models are actually designed and/or implemented in Julia, you can take a look at the ColorTypes.jl package.

    Fixed Point Numbers

    The idea behind using fixed point numbers for each color component is fairly simple. No matter how many bits a component is made up of, we always want the largest possible value of the component to be equal to 1.0 and the smallest possible value to be equal to 0. Of course, the amount of possible intermediate numbers still depends on the number of underlying bits in the memory, but that is not much of an issue.

    julia> using FixedPointNumbers;
    -
    -julia> reinterpret(N0f8, 0xFF)
    -1.0N0f8
    -
    -julia> reinterpret(N0f16, 0xFFFF)
    -1.0N0f16

    Not only does this allow for simple conversion between different color depths, it also allows us to implement generic algorithms, that are completely agnostic to the utilized color depth.

    It is worth pointing out again, that we get all these goodies without actually changing or copying the original memory block. Remember how during this whole tutorial we have only changed the interpretation of some underlying memory, and have not had the need to copy any data so far.

    Tip

    For pixel data we are mainly interested in unsigned fixed point numbers, but there are others too. Check out the package FixedPointNumbers.jl for more information on fixed point numbers in general.

    Let us now leave our toy model behind and use the actual implementation of RGB on our example vector memory. With the first command we will interpret our data as two pixels with 8 bit per color channel, and with the second command as a single pixel of 16 bit per color channel

    julia> using Colors, FixedPointNumbers;
    -
    -julia> reinterpret(RGB{N0f8}, memory)
    -2-element Array{RGB{N0f8},1}:
    - RGB{N0f8}(0.004,0.008,0.012)
    - RGB{N0f8}(0.016,0.02,0.024)
    -
    -julia> reinterpret(RGB{N0f16}, memory)
    -1-element Array{RGB{N0f16},1}:
    - RGB{N0f16}(0.00783,0.01567,0.02351)

    Note how the values are now interpreted as floating point numbers.

    diff --git a/index.html b/index.html index 9a12b804..0e47208d 100644 --- a/index.html +++ b/index.html @@ -1,14 +1,6 @@ -Home · Augmentor.jl

    Home

    header

    A fast library for increasing the number of training images by applying various transformations.

    Augmentor.jl's documentation

    Augmentor is a real-time image augmentation library designed to render the process of artificial dataset enlargement more convenient, less error prone, and easier to reproduce. It offers the user the ability to build a stochastic image-processing pipeline (or simply augmentation pipeline) using image operations as building blocks. In other words, an augmentation pipeline is little more but a sequence of operations for which the parameters can (but need not) be random variables, as the following code snippet demonstrates.

    julia> using Augmentor
    -
    -julia> pl = ElasticDistortion(6, scale=0.3, border=true) |>
    -            Rotate([10, -5, -3, 0, 3, 5, 10]) |>
    -            ShearX(-10:10) * ShearY(-10:10) |>
    -            CropSize(28, 28) |>
    -            Zoom(0.9:0.1:1.2)
    -5-step Augmentor.ImmutablePipeline:
    - 1.) Distort using a smoothed and normalized 6×6 grid
    - 2.) Rotate by θ ∈ [10, -5, -3, 0, 3, 5, 10] degree
    - 3.) Either: (50%) ShearX by ϕ ∈ -10:10 degree. (50%) ShearY by ψ ∈ -10:10 degree.
    - 4.) Crop a 28×28 window around the center
    - 5.) Zoom by I ∈ {0.9×0.9, 1.0×1.0, 1.1×1.1, 1.2×1.2}

    Such a pipeline can then be used for sampling. Here we use the first few examples of the MNIST database.

    mnist 1 mnist 2 mnist 3 mnist 4 mnist 5 mnist 6 mnist 7 mnist 8 mnist 9 mnist 10 mnist 11 mnist 12 mnist 13 mnist 14 mnist 15 mnist 16 mnist 17 mnist 18 mnist 19 mnist 20 mnist 21 mnist 22 mnist 23 mnist 24

    The Julia version of Augmentor is engineered specifically for high performance applications. It makes use of multiple heuristics to generate efficient tailor-made code for the concrete user-specified augmentation pipeline. In particular Augmentor tries to avoid the need for any intermediate images, but instead aims to compute the output image directly from the input in one single pass.

    Where to begin?

    If this is the first time you consider using Augmentor.jl for your machine learning related experiments or packages, make sure to check out the "Getting Started" section. There we list the installation instructions and some simple hello world examples.

    Augmentor.jl is the Julia package for Augmentor. You can find the Python version here.

    Introduction and Motivation

    If you are new to image augmentation in general, or are simply interested in some background information, feel free to take a look at the following sections. There we discuss the concepts involved and outline the most important terms and definitions.

    In case you have not worked with image data in Julia before, feel free to browse the following documents for a crash course on how image data is represented in the Julia language, as well as how to visualize it. For more information on image processing in Julia, take a look at the documentation for the vast JuliaImages ecosystem.

    User's Guide

    As the name suggests, Augmentor was designed with image augmentation for machine learning in mind. That said, the way the library is implemented allows it to also be used for efficient image processing outside the machine learning domain.

    The following section describes the high-level user interface in detail. In particular it focuses on how a (stochastic) image-processing pipeline can be defined and then be applied to an image (or a set of images). It also discusses how batch processing of multiple images can be performed in parallel using multi-threading.

    We mentioned before that an augmentation pipeline is just a sequence of image operations. Augmentor ships with a number of predefined operations, which should be sufficient to describe the most commonly utilized augmentation strategies. Each operation is represented as its own unique type. The following section provides a complete list of all the exported operations and their documentation.

    Tutorials

    Just like an image can say more than a thousand words, a simple hands-on tutorial showing actual code can say more than many pages of formal documentation.

    The first step of devising a successful augmentation strategy is to identify an appropriate set of operations and parameters. What that means can vary widely, because the utility of each operation depends on the dataset at hand (see label-preserving transformations for an example). To that end, we will spend the first tutorial discussing a simple but useful approach to interactively explore and visualize the space of possible parameters.

    In the next tutorials we will take a close look at how we can actually use Augmentor in combination with popular deep learning frameworks. The first framework we will discuss will be Knet. In particular we will focus on adapting an already existing example to make use of a (quite complicated) augmentation pipeline. Furthermore, this tutorial will also serve to showcase the various ways that augmentation can influence the performance of your network.

    Citing Augmentor

    If you use Augmentor for academic research and wish to cite it, please use the following paper.

    Marcus D. Bloice, Christof Stocker, and Andreas Holzinger, Augmentor: An Image Augmentation Library for Machine Learning, arXiv preprint arXiv:1708.04680, https://arxiv.org/abs/1708.04680, 2017.

    Indices

    + + + + + diff --git a/indices/index.html b/indices/index.html deleted file mode 100644 index 2642ec46..00000000 --- a/indices/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Indices · Augmentor.jl

    Indices

    Functions

    Types

    diff --git a/interface/index.html b/interface/index.html deleted file mode 100644 index c443bcb1..00000000 --- a/interface/index.html +++ /dev/null @@ -1,57 +0,0 @@ - -High-level Interface · Augmentor.jl

    High-level Interface

    High-level Interface

    Integrating Augmentor into an existing project should in general not require any major changes to your code. In most cases it should break down to the three basic steps outlined below. We will spend the rest of this document investigating these in more detail.

    1. Import Augmentor into the namespace of your program.

      using Augmentor
    2. Define a (stochastic) image processing pipeline by chaining the desired operations using |> and *.

      julia> pl = FlipX() * FlipY() |> Zoom(0.9:0.1:1.2) |> CropSize(64,64)
      -3-step Augmentor.ImmutablePipeline:
      - 1.) Either: (50%) Flip the X axis. (50%) Flip the Y axis.
      - 2.) Zoom by I ∈ {0.9×0.9, 1.0×1.0, 1.1×1.1, 1.2×1.2}
      - 3.) Crop a 64×64 window around the center
    3. Apply the pipeline to the existing image or set of images.

      img_processed = augment(img_original, pl)

    Depending on the complexity of your problem, you may want to iterate between 2. and 3. to identify an appropriate pipeline. Take a look at the Elastic Distortions Tutorial for an example of how such an iterative process could look like.

    Defining a Pipeline

    In Augmentor, a (stochastic) image-processing pipeline can be understood as a sequence of operations, for which the parameters can (but need not) be random variables. What that essentially means is that the user explicitly specifies which image operation to perform in what order. A complete list of available operations can be found at Supported Operations.

    To start off with a simple example, let us assume that we want to first rotate our image(s) counter-clockwise by 14°, then crop them down to the biggest possible square, and lastly resize the image(s) to a fixed size of 64 by 64 pixel. Such a pipeline would be defined as follows:

    julia> pl = Rotate(14) |> CropRatio(1) |> Resize(64,64)
    -3-step Augmentor.ImmutablePipeline:
    - 1.) Rotate 14 degree
    - 2.) Crop to 1:1 aspect ratio
    - 3.) Resize to 64×64

    Notice that in the example above there is no room for randomness. In other words, the same input image would always result in the same output image given that pipeline. If we wish for more variation we can do so by using a vector as our parameters, instead of a single number.

    Note

    In this subsection we will focus only on how to define a pipeline, without actually thinking too much about how to apply that pipeline to an actual image. The later will be the main topic of the rest of this document.

    Say we wish to adapt our pipeline such that the rotation is a little more random. More specifically, lets say we want our image to be rotated by either -10°, -5°, 5°, 10°, or not at all. Other than that change we will leave the rest of the pipeline as is.

    julia> pl = Rotate([-10,-5,0,5,10]) |> CropRatio(1) |> Resize(64,64)
    -3-step Augmentor.ImmutablePipeline:
    - 1.) Rotate by θ ∈ [-10, -5, 0, 5, 10] degree
    - 2.) Crop to 1:1 aspect ratio
    - 3.) Resize to 64×64

    Variation in the parameters is only one of the two main ways to introduce randomness to our pipeline. Additionally, one can specify that an operation should be sampled randomly from a chosen set of operations . This can be accomplished using a utility operation called Either, which has its own convenience syntax.

    As an example, let us assume we wish to first either mirror our image(s) horizontally, or vertically, or not at all, and then crop it down to a size of 100 by 100 pixel around the image's center. We can specify the "either" using the * operator.

    julia> pl = FlipX() * FlipY() * NoOp() |> CropSize(100,100)
    -2-step Augmentor.ImmutablePipeline:
    - 1.) Either: (33%) Flip the X axis. (33%) Flip the Y axis. (33%) No operation.
    - 2.) Crop a 100×100 window around the center

    It is also possible to specify the odds of for such an "either". For example we may want the NoOp to be twice as likely as either of the mirroring options.

    julia> pl = (1=>FlipX()) * (1=>FlipY()) * (2=>NoOp()) |> CropSize(100,100)
    -2-step Augmentor.ImmutablePipeline:
    - 1.) Either: (25%) Flip the X axis. (25%) Flip the Y axis. (50%) No operation.
    - 2.) Crop a 100×100 window around the center

    Now that we know how to define a pipeline, let us think about how to apply it to an image or a set of images.

    Loading the Example Image

    Augmentor ships with a custom example image, which was specifically designed for visualizing augmentation effects. It can be accessed by calling the function testpattern(). That said, doing so explicitly should rarely be necessary in practice, because most high-level functions will default to using testpattern() if no other image is specified.

    Augmentor.testpatternFunction.
    testpattern() -> Matrix{RGBA{N0f8}}

    Load and return the provided 300x400 test image.

    The returned image was specifically designed to be informative about the effects of the applied augmentation operations. It is thus well suited to prototype an augmentation pipeline, because it makes it easy to see what kind of effects one can achieve with it.

    source
    using Augmentor
    -img = testpattern()

    testpattern

    Augmenting an Image

    Once a pipeline is constructed it can be applied to an image (i.e. AbstractArray{<:ColorTypes.Colorant}), or even just to an array of numbers (i.e. AbstractArray{<:Number}), using the function augment.

    Augmentor.augmentFunction.
    augment([img], pipeline) -> out

    Apply the operations of the given pipeline sequentially to the given image img and return the resulting image out.

    julia> img = testpattern();
    -
    -julia> out = augment(img, FlipX() |> FlipY())
    -3×2 Array{Gray{N0f8},2}:
    -[...]

    The parameter img can either be a single image, or a tuple of multiple images. In case img is a tuple of images, its elements will be assumed to be conceptually connected. Consequently, all images in the tuple will take the exact same path through the pipeline; even when randomness is involved. This is useful for the purpose of image segmentation, for which the input and output are both images that need to be transformed exactly the same way.

    img1 = testpattern()
    -img2 = Gray.(testpattern())
    -out1, out2 = augment((img1, img2), FlipX() |> FlipY())

    The parameter pipeline can be a Augmentor.Pipeline, a tuple of Augmentor.Operation, or a single Augmentor.Operation.

    img = testpattern()
    -augment(img, FlipX() |> FlipY())
    -augment(img, (FlipX(), FlipY()))
    -augment(img, FlipX())

    If img is omitted, Augmentor will use the augmentation test image provided by the function testpattern as the input image.

    augment(FlipX())
    source

    We also provide a mutating version of augment that writes the output into preallocated memory. While this function avoids allocation, it does have the caveat that the size of the output image must be known beforehand (and thus must not be random).

    Augmentor.augment!Function.
    augment!(out, img, pipeline) -> out

    Apply the operations of the given pipeline sequentially to the image img and write the resulting image into the preallocated parameter out. For convenience out is also the function's return-value.

    img = testpattern()
    -out = similar(img)
    -augment!(out, img, FlipX() |> FlipY())

    The parameter img can either be a single image, or a tuple of multiple images. In case img is a tuple of images, the parameter out has to be a tuple of the same length and ordering. See augment for more information.

    imgs = (testpattern(), Gray.(testpattern()))
    -outs = (similar(imgs[1]), similar(imgs[2]))
    -augment!(outs, imgs, FlipX() |> FlipY())

    The parameter pipeline can be a Augmentor.Pipeline, a tuple of Augmentor.Operation, or a single Augmentor.Operation.

    img = testpattern()
    -out = similar(img)
    -augment!(out, img, FlipX() |> FlipY())
    -augment!(out, img, (FlipX(), FlipY()))
    -augment!(out, img, FlipX())
    source

    Augmenting Image Batches

    In most machine learning scenarios we will want to process a whole batch of images at once, instead of a single image at a time. For this reason we provide the function augmentbatch!, which also supports multi-threading.

    augmentbatch!([resource], outs, imgs, pipeline, [obsdim]) -> outs

    Apply the operations of the given pipeline to the images in imgs and write the resulting images into outs.

    Both outs and imgs have to contain the same number of images. Each of these two variables can either be in the form of a higher dimensional array, in the form of a vector of arrays for which each vector element denotes an image.

    # create five example observations of size 3x3
    -imgs = rand(3,3,5)
    -# create output arrays of appropriate shape
    -outs = similar(imgs)
    -# transform the batch of images
    -augmentbatch!(outs, imgs, FlipX() |> FlipY())

    If one (or both) of the two parameters outs and imgs is a higher dimensional array, then the optional parameter obsdim can be used specify which dimension denotes the observations (defaults to ObsDim.Last()),

    # create five example observations of size 3x3
    -imgs = rand(5,3,3)
    -# create output arrays of appropriate shape
    -outs = similar(imgs)
    -# transform the batch of images
    -augmentbatch!(outs, imgs, FlipX() |> FlipY(), ObsDim.First())

    Similar to augment!, it is also allowed for outs and imgs to both be tuples of the same length. If that is the case, then each tuple element can be in any of the forms listed above. This is useful for tasks such as image segmentation, where each observations is made up of more than one image.

    # create five example observations where each observation is
    -# made up of two conceptually linked 3x3 arrays
    -imgs = (rand(3,3,5), rand(3,3,5))
    -# create output arrays of appropriate shape
    -outs = similar.(imgs)
    -# transform the batch of images
    -augmentbatch!(outs, imgs, FlipX() |> FlipY())

    The parameter pipeline can be a Augmentor.Pipeline, a tuple of Augmentor.Operation, or a single Augmentor.Operation.

    augmentbatch!(outs, imgs, FlipX() |> FlipY())
    -augmentbatch!(outs, imgs, (FlipX(), FlipY()))
    -augmentbatch!(outs, imgs, FlipX())

    The optional first parameter resource can either be CPU1() (default) or CPUThreads(). In the later case the images will be augmented in parallel. For this to make sense make sure that the environment variable JULIA_NUM_THREADS is set to a reasonable number so that Threads.nthreads() is greater than 1.

    # transform the batch of images in parallel using multithreading
    -augmentbatch!(CPUThreads(), outs, imgs, FlipX() |> FlipY())
    source
    diff --git a/operations/aggmapfun/index.html b/operations/aggmapfun/index.html deleted file mode 100644 index d3b9717a..00000000 --- a/operations/aggmapfun/index.html +++ /dev/null @@ -1,6 +0,0 @@ - -AggregateThenMapFun: Aggregate and Map over Image · Augmentor.jl

    AggregateThenMapFun: Aggregate and Map over Image

    AggregateThenMapFun: Aggregate and Map over Image

    AggregateThenMapFun <: Augmentor.Operation

    Description

    Compute some aggregated value of the current image using the given function aggfun, and map that value over the current image using the given function mapfun.

    This is particularly useful for achieving effects such as per-image normalization.

    Usage

    AggregateThenMapFun(aggfun, mapfun)

    Arguments

    • aggfun : A function that takes the whole current image as input and which result will also be passed to mapfun. It should have a signature of img -> agg, where img will the the current image. What type and value agg should be is up to the user.

    • mapfun : The binary function that should be mapped over all individual array elements. It should have a signature of (px, agg) -> new_px where px is a single element of the current image, and agg is the output of aggfun.

    See also

    MapFun, ConvertEltype, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# subtract the average RGB value of the current image
    -augment(img, AggregateThenMapFun(img -> mean(img), (px, agg) -> px - agg))
    source
    diff --git a/operations/cacheimage/index.html b/operations/cacheimage/index.html deleted file mode 100644 index 060a85eb..00000000 --- a/operations/cacheimage/index.html +++ /dev/null @@ -1,15 +0,0 @@ - -CacheImage: Buffer current state · Augmentor.jl

    CacheImage: Buffer current state

    CacheImage: Buffer current state

    CacheImage <: Augmentor.ImageOperation

    Description

    Write the current state of the image into the working memory. Optionally a user has the option to specify a preallocated buffer to write the image into. Note that if a buffer is provided, then it has to be of the correct size and eltype.

    Even without a preallocated buffer it can be beneficial in some situations to cache the image. An example for such a scenario is when chaining a number of affine transformations after an elastic distortion, because performing that lazily requires nested interpolation.

    Usage

    CacheImage()
    -
    -CacheImage(buffer)

    Arguments

    • buffer : Optional. A preallocated AbstractArray of the appropriate size and eltype.

    See also

    augment

    Examples

    using Augmentor
    -
    -# make pipeline that forces caching after elastic distortion
    -pl = ElasticDistortion(3,3) |> CacheImage() |> Rotate(-10:10) |> ShearX(-5:5)
    -
    -# cache output of elastic distortion into the allocated
    -# 20x20 Matrix{Float64}. Note that for this case this assumes that
    -# the input image is also a 20x20 Matrix{Float64}
    -pl = ElasticDistortion(3,3) |> CacheImage(zeros(20,20)) |> Rotate(-10:10)
    -
    -# convenience syntax with the same effect as above.
    -pl = ElasticDistortion(3,3) |> zeros(20,20) |> Rotate(-10:10)
    source
    diff --git a/operations/combinechannels/index.html b/operations/combinechannels/index.html deleted file mode 100644 index 06cd118b..00000000 --- a/operations/combinechannels/index.html +++ /dev/null @@ -1,18 +0,0 @@ - -ComineChannels: Combine color channels · Augmentor.jl

    ComineChannels: Combine color channels

    ComineChannels: Combine color channels

    CombineChannels <: Augmentor.Operation

    Description

    Combines the first dimension of a given array into a colorant of type colortype using the function ImageCore.colorview. The main difference is that a separate color channel is also expected for Gray images.

    The shape of the input image has to be appropriate for the given colortype, which also means that the separated color channel has to be the first dimension of the array. See PermuteDims if that is not the case.

    Usage

    CombineChannels(colortype)

    Arguments

    • colortype : The color type of the resulting image. Must be a subtype of ColorTypes.Colorant and match the color channel of the given image.

    See also

    SplitChannels, PermuteDims, augment

    Examples

    julia> using Augmentor, Colors
    -
    -julia> A = rand(3, 10, 10) # three color channels
    -3×10×10 Array{Float64,3}:
    -[...]
    -
    -julia> augment(A, CombineChannels(RGB))
    -10×10 Array{RGB{Float64},2}:
    -[...]
    -
    -julia> B = rand(1, 10, 10) # singleton color channel
    -1×10×10 Array{Float64,3}:
    -[...]
    -
    -julia> augment(B, CombineChannels(Gray))
    -10×10 Array{Gray{Float64},2}:
    -[...]
    source
    diff --git a/operations/converteltype/index.html b/operations/converteltype/index.html deleted file mode 100644 index 2e31c6b0..00000000 --- a/operations/converteltype/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -ConvertEltype: Color conversion · Augmentor.jl

    ConvertEltype: Color conversion

    ConvertEltype: Color conversion

    ConvertEltype <: Augmentor.Operation

    Description

    Convert the element type of the given array/image into the given eltype. This operation is especially useful for converting color images to grayscale (or the other way around). That said, the operation is not specific to color types and can also be used for numeric arrays (e.g. with separated channels).

    Note that this is an element-wise convert function. Thus it can not be used to combine or separate color channels. Use SplitChannels or CombineChannels for those purposes.

    Usage

    ConvertEltype(eltype)

    Arguments

    • eltype : The eltype of the resulting array/image.

    See also

    CombineChannels, SplitChannels, augment

    Examples

    julia> using Augmentor, Colors
    -
    -julia> A = rand(RGB, 10, 10) # three color channels
    -10×10 Array{RGB{Float64},2}:
    -[...]
    -
    -julia> augment(A, ConvertEltype(Gray{Float32})) # convert to grayscale
    -10×10 Array{Gray{Float32},2}:
    -[...]
    source
    InputConvertEltype(GrayA{N0f8})
    inputoutput
    diff --git a/operations/crop/index.html b/operations/crop/index.html deleted file mode 100644 index 0370107c..00000000 --- a/operations/crop/index.html +++ /dev/null @@ -1,12 +0,0 @@ - -Crop: Subset image · Augmentor.jl

    Crop: Subset image

    Crop: Subset image

    Augmentor.CropType.
    Crop <: Augmentor.ImageOperation

    Description

    Crops out the area denoted by the specified pixel ranges.

    For example the operation Crop(5:100, 2:10) would denote a crop for the rectangle that starts at x=2 and y=5 in the top left corner and ends at x=10 and y=100 in the bottom right corner. As we can see the y-axis is specified first, because that is how the image is stored in an array. Thus the order of the provided indices ranges needs to reflect the order of the array dimensions.

    Usage

    Crop(indices)
    -
    -Crop(indices...)

    Arguments

    • indices : NTuple or Vararg of UnitRange that denote the cropping range for each array dimension. This is very similar to how the indices for view are specified.

    See also

    CropNative, CropSize, CropRatio, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = testpattern()
    -300×400 Array{RGBA{N0f8},2}:
    -[...]
    -
    -julia> augment(img, Crop(1:30, 361:400)) # crop upper right corner
    -30×40 Array{RGBA{N0f8},2}:
    -[...]
    source
    InputCrop(70:140, 25:155)
    inputoutput
    diff --git a/operations/cropnative/index.html b/operations/cropnative/index.html deleted file mode 100644 index 60af7eb2..00000000 --- a/operations/cropnative/index.html +++ /dev/null @@ -1,11 +0,0 @@ - -CropNative: Subset image · Augmentor.jl

    CropNative: Subset image

    CropNative: Subset image

    CropNative <: Augmentor.ImageOperation

    Description

    Crops out the area denoted by the specified pixel ranges.

    For example the operation CropNative(5:100, 2:10) would denote a crop for the rectangle that starts at x=2 and y=5 in the top left corner of native space and ends at x=10 and y=100 in the bottom right corner of native space.

    In contrast to Crop, the position x=1 y=1 is not necessarily located at the top left of the current image, but instead depends on the cumulative effect of the previous transformations. The reason for this is because affine transformations are usually performed around the center of the image, which is reflected in "native space". This is useful for combining transformations such as Rotate or ShearX with a crop around the center area.

    Usage

    CropNative(indices)
    -
    -CropNative(indices...)

    Arguments

    • indices : NTuple or Vararg of UnitRange that denote the cropping range for each array dimension. This is very similar to how the indices for view are specified.

    See also

    Crop, CropSize, CropRatio, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# cropped at top left corner
    -augment(img, Rotate(45) |> Crop(1:300, 1:400))
    -
    -# cropped around center of rotated image
    -augment(img, Rotate(45) |> CropNative(1:300, 1:400))
    source
    (Rotate(45), Crop(1:210,1:280))(Rotate(45), CropNative(1:210,1:280))
    inputoutput
    diff --git a/operations/cropratio/index.html b/operations/cropratio/index.html deleted file mode 100644 index d7ff6d32..00000000 --- a/operations/cropratio/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -CropRatio: Crop centered window · Augmentor.jl

    CropRatio: Crop centered window

    CropRatio: Crop centered window

    CropRatio <: Augmentor.ImageOperation

    Description

    Crops out the biggest area around the center of the given image such that the output image satisfies the specified aspect ratio (i.e. width divided by height).

    For example the operation CropRatio(1) would denote a crop for the biggest square around the center of the image.

    For randomly placed crops take a look at RCropRatio.

    Usage

    CropRatio(ratio)
    -
    -CropRatio(; ratio = 1)

    Arguments

    • ratio::Number : Optional. A number denoting the aspect ratio. For example specifying ratio=16/9 would denote a 16:9 aspect ratio. Defaults to 1, which describes a square crop.

    See also

    RCropRatio, CropSize, Crop, CropNative, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# crop biggest square around the image center
    -augment(img, CropRatio(1))
    source
    InputOutput for CropRatio(1)
    inputoutput
    diff --git a/operations/cropsize/index.html b/operations/cropsize/index.html deleted file mode 100644 index e149244e..00000000 --- a/operations/cropsize/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -CropSize: Crop centered window · Augmentor.jl

    CropSize: Crop centered window

    CropSize: Crop centered window

    CropSize <: Augmentor.ImageOperation

    Description

    Crops out the area of the specified pixel size around the center of the input image.

    For example the operation CropSize(10, 50) would denote a crop for a rectangle of height 10 and width 50 around the center of the input image.

    Usage

    CropSize(size)
    -
    -CropSize(size...)

    Arguments

    • size : NTuple or Vararg of Int that denote the output size in pixel for each dimension.

    See also

    CropRatio, Crop, CropNative, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# cropped around center of rotated image
    -augment(img, Rotate(45) |> CropSize(300, 400))
    source
    InputOutput for CropSize(45, 225)
    inputoutput
    diff --git a/operations/either/index.html b/operations/either/index.html deleted file mode 100644 index ed739032..00000000 --- a/operations/either/index.html +++ /dev/null @@ -1,21 +0,0 @@ - -Either: Stochastic branches · Augmentor.jl

    Either: Stochastic branches

    Either: Stochastic branches

    Either <: Augmentor.ImageOperation

    Description

    Chooses between the given operations at random when applied. This is particularly useful if one for example wants to first either rotate the image 90 degree clockwise or anticlockwise (but never both), and then apply some other operation(s) afterwards.

    When compiling a pipeline, Either will analyze the provided operations in order to identify the preferred formalism to use when applied. The chosen formalism is chosen such that it is supported by all given operations. This way the output of applying Either will be inferable and the whole pipeline will remain type-stable (even though randomness is involved).

    By default each specified image operation has the same probability of occurrence. This default behaviour can be overwritten by specifying the chance manually.

    Usage

    Either(operations, [chances])
    -
    -Either(operations...; [chances])
    -
    -Either(pairs...)
    -
    -*(operations...)
    -
    -*(pairs...)

    Arguments

    • operations : NTuple or Vararg of Augmentor.ImageOperation that denote the possible choices to sample from when applied.

    • chances : Optional. Denotes the relative chances for an operation to be sampled. Has to contain the same number of elements as operations. Either an NTuple of numbers if specified as positional argument, or alternatively a AbstractVector of numbers if specified as a keyword argument. If omitted every operation will have equal probability of occurring.

    • pairs : Vararg of Pair{<:Real,<:Augmentor.ImageOperation}. A compact way to specify an operation and its chance of occurring together.

    See also

    NoOp, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# all three operations have equal chance of occuring
    -augment(img, Either(FlipX(), FlipY(), NoOp()))
    -augment(img, FlipX() * FlipY() * NoOp())
    -
    -# NoOp is twice as likely as either FlipX or FlipY
    -augment(img, Either(1=>FlipX(), 1=>FlipY(), 2=>NoOp()))
    -augment(img, Either(FlipX(), FlipY(), NoOp(), chances=[1,1,2]))
    -augment(img, Either((FlipX(), FlipY(), NoOp()), (1,1,2)))
    -augment(img, (1=>FlipX()) * (1=>FlipY()) * (2=>NoOp()))
    source
    diff --git a/operations/elasticdistortion/index.html b/operations/elasticdistortion/index.html deleted file mode 100644 index c2df7437..00000000 --- a/operations/elasticdistortion/index.html +++ /dev/null @@ -1,13 +0,0 @@ - -ElasticDistortion: Smoothed random distortions · Augmentor.jl

    ElasticDistortion: Smoothed random distortions

    ElasticDistortion: Smoothed random distortions

    ElasticDistortion <: Augmentor.ImageOperation

    Description

    Distorts the given image using a randomly (uniform) generated vector field of the given grid size. This field will be stretched over the given image when applied, which in turn will morph the original image into a new image using a linear interpolation of both the image and the vector field.

    In contrast to [RandomDistortion], the resulting vector field is also smoothed using a Gaussian filter with of parameter sigma. This will result in a less chaotic vector field and thus resemble a more natural distortion.

    Usage

    ElasticDistortion(gridheight, gridwidth, scale, sigma, [iter=1], [border=false], [norm=true])
    -
    -ElasticDistortion(gridheight, gridwidth, scale; [sigma=2], [iter=1], [border=false], [norm=true])
    -
    -ElasticDistortion(gridheight, [gridwidth]; [scale=0.2], [sigma=2], [iter=1], [border=false], [norm=true])

    Arguments

    • gridheight : The grid height of the displacement vector field. This effectively specifies the number of vertices along the Y dimension used as landmarks, where all the positions between the grid points are interpolated.

    • gridwidth : The grid width of the displacement vector field. This effectively specifies the number of vertices along the Y dimension used as landmarks, where all the positions between the grid points are interpolated.

    • scale : Optional. The scaling factor applied to all displacement vectors in the field. This effectively defines the "strength" of the deformation. There is no theoretical upper limit to this factor, but a value somewhere between 0.01 and 1.0 seem to be the most reasonable choices. Default to 0.2.

    • sigma : Optional. Sigma parameter of the Gaussian filter. This parameter effectively controls the strength of the smoothing. Defaults to 2.

    • iter : Optional. The number of times the smoothing operation is applied to the displacement vector field. This is especially useful if border = false because the border will be reset to zero after each pass. Thus the displacement is a little less aggressive towards the borders of the image than it is towards its center. Defaults to 1.

    • border : Optional. Specifies if the borders should be distorted as well. If false, the borders of the image will be preserved. This effectively pins the outermost vertices on their original position and the operation thus only distorts the inner content of the image. Defaults to false.

    • norm : Optional. If true, the displacement vectors of the field will be normalized by the norm of the field. This will have the effect that the scale factor should be more or less independent of the grid size. Defaults to true.

    See also

    augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# distort with pinned borders
    -augment(img, ElasticDistortion(15, 15; scale = 0.1))
    -
    -# distort everything more smoothly.
    -augment(img, ElasticDistortion(10, 10; sigma = 4, iter=3, border=true))
    source
    InputElasticDistortion(15, 15, 0.1)
    inputoutput
    InputElasticDistortion(10, 10, 0.2, 4, 3, true)
    inputoutput
    diff --git a/operations/flipx/index.html b/operations/flipx/index.html deleted file mode 100644 index 0e8b3cdf..00000000 --- a/operations/flipx/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -FlipX: Mirror horizontally · Augmentor.jl

    FlipX: Mirror horizontally

    FlipX: Mirror horizontally

    Augmentor.FlipXType.
    FlipX <: Augmentor.AffineOperation

    Description

    Reverses the x-order of each pixel row. Another way of describing it would be that it mirrors the image on the y-axis, or that it mirrors the image horizontally.

    If created using the parameter p, the operation will be lifted into Either(p=>FlipX(), 1-p=>NoOp()), where p denotes the probability of applying FlipX and 1-p the probability for applying NoOp. See the documentation of Either for more information.

    Usage

    FlipX()
    -
    -FlipX(p)

    Arguments

    • p::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].

    See also

    FlipY, Either, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = [200 150; 50 1]
    -2×2 Array{Int64,2}:
    - 200  150
    -  50    1
    -
    -julia> img_new = augment(img, FlipX())
    -2×2 Array{Int64,2}:
    - 150  200
    -   1   50
    source
    InputOutput for FlipX()
    inputoutput
    diff --git a/operations/flipy/index.html b/operations/flipy/index.html deleted file mode 100644 index 3fa9a3fe..00000000 --- a/operations/flipy/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -FlipY: Mirror vertically · Augmentor.jl

    FlipY: Mirror vertically

    FlipY: Mirror vertically

    Augmentor.FlipYType.
    FlipY <: Augmentor.AffineOperation

    Description

    Reverses the y-order of each pixel column. Another way of describing it would be that it mirrors the image on the x-axis, or that it mirrors the image vertically.

    If created using the parameter p, the operation will be lifted into Either(p=>FlipY(), 1-p=>NoOp()), where p denotes the probability of applying FlipY and 1-p the probability for applying NoOp. See the documentation of Either for more information.

    Usage

    FlipY()
    -
    -FlipY(p)

    Arguments

    • p::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].

    See also

    FlipX, Either, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = [200 150; 50 1]
    -2×2 Array{Int64,2}:
    - 200  150
    -  50    1
    -
    -julia> img_new = augment(img, FlipY())
    -2×2 Array{Int64,2}:
    -  50    1
    - 200  150
    source
    InputOutput for FlipY()
    inputoutput
    diff --git a/operations/index.html b/operations/index.html deleted file mode 100644 index 926f2008..00000000 --- a/operations/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Supported Operations · Augmentor.jl

    Supported Operations

    Supported Operations

    Augmentor provides a wide varitey of build-in image operations. This page provides an overview of all exported operations organized by their main category. These categories are chosen because they serve some practical purpose. For example Affine Operations allow for a special optimization under the hood when chained together.

    Tip

    Click on an image operation for more details.

    Affine Transformations

    A sizeable amount of the provided operations fall under the category of affine transformations. As such, they can be described using what is known as an affine map, which are inherently compose-able if chained together. However, utilizing such a affine formulation requires (costly) interpolation, which may not always be needed to achieve the desired effect. For that reason do some of the operations below also provide a special purpose implementation to produce their specified result. Those are usually preferred over the affine formulation if sensible considering the complete pipeline.

    InputFlipXFlipYRotate90Rotate270Rotate180
    InputRotateShearXShearYScaleZoom

    Distortions

    Aside from affine transformations, Augmentor also provides functionality for performing a variety of distortions. These types of operations usually provide a much larger distribution of possible output images.

    InputElasticDistortion

    Resizing and Subsetting

    The input images from a given dataset can be of various shapes and sizes. Yet, it is often required by the algorithm that the data must be of uniform structure. To that end Augmentor provides a number of ways to alter or subset given images.

    InputResize

    The process of cropping is useful to discard parts of the input image. To provide this functionality lazily, applying a crop introduces a layer of representation called a "view" or SubArray. This is different yet compatible with how affine operations or other special purpose implementations work. This means that chaining a crop with some affine operation is perfectly fine if done sequentially. However, it is generally not advised to combine affine operations with crop operations within an Either block. Doing that would force the Either to trigger the eager computation of its branches in order to preserve type-stability.

    InputCropCropNativeCropSizeCropRatioRCropRatio

    Element-wise Transformations and Layout

    It is not uncommon that machine learning frameworks require the data in a specific form and layout. For example many deep learning frameworks expect the colorchannel of the images to be encoded in the third dimension of a 4-dimensional array. Augmentor allows to convert from (and to) these different layouts using special operations that are mainly useful in the beginning or end of a augmentation pipeline.

    CategoryAvailable Operations
    ConversionConvertEltype (e.g. convert to grayscale)
    MappingMapFun, AggregateThenMapFun
    Information LayoutSplitChannels, CombineChannels, PermuteDims, Reshape

    Utility Operations

    Aside from "true" operations that specify some kind of transformation, there are also a couple of special utility operations used for functionality such as stochastic branching.

    CategoryAvailable Operations
    Utility OperationsNoOp, CacheImage, Either
    diff --git a/operations/mapfun/index.html b/operations/mapfun/index.html deleted file mode 100644 index 68278f1a..00000000 --- a/operations/mapfun/index.html +++ /dev/null @@ -1,10 +0,0 @@ - -MapFun: Map function over Image · Augmentor.jl

    MapFun: Map function over Image

    MapFun: Map function over Image

    MapFun <: Augmentor.Operation

    Description

    Maps the given function over all individual array elements.

    This means that the given function is called with an individual elements and is expected to return a transformed element that should take the original's place. This further implies that the function is expected to be unary. It is encouraged that the function should be consistent with its return type and type-stable.

    Usage

    MapFun(fun)

    Arguments

    • fun : The unary function that should be mapped over all individual array elements.

    See also

    AggregateThenMapFun, ConvertEltype, augment

    Examples

    using Augmentor, ColorTypes
    -img = testpattern()
    -
    -# subtract the constant RGBA value from each pixel
    -augment(img, MapFun(px -> px - RGBA(0.5, 0.3, 0.7, 0.0)))
    -
    -# separate channels to scale each numeric element by a constant value
    -pl = SplitChannels() |> MapFun(el -> el * 0.5) |> CombineChannels(RGBA)
    -augment(img, pl)
    source
    diff --git a/operations/noop/index.html b/operations/noop/index.html deleted file mode 100644 index 7ab12821..00000000 --- a/operations/noop/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -NoOp: Identity function · Augmentor.jl diff --git a/operations/permutedims/index.html b/operations/permutedims/index.html deleted file mode 100644 index 31d413cf..00000000 --- a/operations/permutedims/index.html +++ /dev/null @@ -1,20 +0,0 @@ - -PermuteDims: Change dimension order · Augmentor.jl

    PermuteDims: Change dimension order

    PermuteDims: Change dimension order

    PermuteDims <: Augmentor.Operation

    Description

    Permute the dimensions of the given array with the predefined permutation perm. This operation is particularly useful if the order of the dimensions needs to be different than the default "julian" layout (described below).

    Augmentor expects the given images to be in vertical-major layout for which the colors are encoded in the element type itself. Many deep learning frameworks however require their input in a different order. For example it is not untypical that separate color channels are expected to be encoded in the third dimension.

    Usage

    PermuteDims(perm)
    -
    -PermuteDims(perm...)

    Arguments

    • perm : The concrete dimension permutation that should be used. Has to be specified as a Vararg{Int} or as a NTuple of Int. The length of perm has to match the number of dimensions of the expected input image to that operation.

    See also

    SplitChannels, CombineChannels, augment

    Examples

    julia> using Augmentor, Colors
    -
    -julia> A = rand(10, 5, 3) # width=10, height=5, and 3 color channels
    -10×5×3 Array{Float64,3}:
    -[...]
    -
    -julia> img = augment(A, PermuteDims(3,2,1) |> CombineChannels(RGB))
    -5×10 Array{RGB{Float64},2}:
    -[...]
    -
    -julia> img2 = testpattern()
    -300×400 Array{RGBA{N0f8},2}:
    -[...]
    -
    -julia> B = augment(img2, SplitChannels() |> PermuteDims(3,2,1))
    -400×300×4 Array{N0f8,3}:
    -[...]
    source
    diff --git a/operations/rcropratio/index.html b/operations/rcropratio/index.html deleted file mode 100644 index 06a4cc3a..00000000 --- a/operations/rcropratio/index.html +++ /dev/null @@ -1,8 +0,0 @@ - -RCropRatio: Crop random window · Augmentor.jl

    RCropRatio: Crop random window

    RCropRatio: Crop random window

    RCropRatio <: Augmentor.ImageOperation

    Description

    Crops out the biggest possible area at some random position of the given image, such that the output image satisfies the specified aspect ratio (i.e. width divided by height).

    For example the operation RCropRatio(1) would denote a crop for the biggest possible square. If there is more than one such square, then one will be selected at random.

    Usage

    RCropRatio(ratio)
    -
    -RCropRatio(; ratio = 1)

    Arguments

    • ratio::Number : Optional. A number denoting the aspect ratio. For example specifying ratio=16/9 would denote a 16:9 aspect ratio. Defaults to 1, which describes a square crop.

    See also

    CropRatio, CropSize, Crop, CropNative, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# crop a randomly placed square of maxmimum size
    -augment(img, RCropRatio(1))
    source
    InputSamples for RCropRatio(1)
    inputoutput
    diff --git a/operations/reshape/index.html b/operations/reshape/index.html deleted file mode 100644 index a5d0c6c7..00000000 --- a/operations/reshape/index.html +++ /dev/null @@ -1,12 +0,0 @@ - -Reshape: Reinterpret shape · Augmentor.jl

    Reshape: Reinterpret shape

    Reshape: Reinterpret shape

    Reshape <: Augmentor.Operation

    Description

    Reinterpret the shape of the given array of numbers or colorants. This is useful for example to create singleton-dimensions that deep learning frameworks may need for colorless images, or for converting an image array to a feature vector (and vice versa).

    Usage

    Reshape(dims)
    -
    -Reshape(dims...)

    Arguments

    • dims : The new sizes for each dimension of the output image. Has to be specified as a Vararg{Int} or as a NTuple of Int.

    See also

    CombineChannels, augment

    Examples

    julia> using Augmentor, Colors
    -
    -julia> A = rand(10,10)
    -10×10 Array{Float64,2}:
    -[...]
    -
    -julia> augment(A, Reshape(10,10,1)) # add trailing singleton dimension
    -10×10×1 Array{Float64,3}:
    -[...]
    source
    diff --git a/operations/resize/index.html b/operations/resize/index.html deleted file mode 100644 index 9842d04a..00000000 --- a/operations/resize/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -Resize: Set static image size · Augmentor.jl

    Resize: Set static image size

    Resize: Set static image size

    Resize <: Augmentor.ImageOperation

    Description

    Rescales the image to a fixed pre-specified pixel size.

    This operation does not take any measures to preserve aspect ratio of the source image. Instead, the original image will simply be resized to the given dimensions. This is useful when one needs a set of images to all be of the exact same size.

    Usage

    Resize(; height=64, width=64)
    -
    -Resize(size)
    -
    -Resize(size...)

    Arguments

    • size : NTuple or Vararg of Int that denote the output size in pixel for each dimension.

    See also

    CropSize, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -augment(img, Resize(30, 40))
    source
    InputOutput for Resize(100, 150)
    inputoutput
    diff --git a/operations/rotate/index.html b/operations/rotate/index.html deleted file mode 100644 index 4952fb10..00000000 --- a/operations/rotate/index.html +++ /dev/null @@ -1,12 +0,0 @@ - -Rotate: Arbitrary rotations · Augmentor.jl

    Rotate: Arbitrary rotations

    Rotate: Arbitrary rotations

    Rotate <: Augmentor.AffineOperation

    Description

    Rotate the image upwards for the given degree. This operation can only be performed as an affine transformation and will in general cause other operations of the pipeline to use their affine formulation as well (if they have one).

    In contrast to the special case rotations (e.g. Rotate90, the type Rotate can describe any arbitrary number of degrees. It will always perform the rotation around the center of the image. This can be particularly useful when combining the operation with CropNative.

    Usage

    Rotate(degree)

    Arguments

    • degree : Real or AbstractVector of Real that denote the rotation angle(s) in degree. If a vector is provided, then a random element will be sampled each time the operation is applied.

    See also

    Rotate90, Rotate180, Rotate270, CropNative, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# rotate exactly 45 degree
    -augment(img, Rotate(45))
    -
    -# rotate between 10 and 20 degree upwards
    -augment(img, Rotate(10:20))
    -
    -# rotate one of the five specified degrees
    -augment(img, Rotate([-10, -5, 0, 5, 10]))
    source

    In contrast to the special case rotations outlined above, the type Rotate can describe any arbitrary number of degrees. It will always perform the rotation around the center of the image. This can be particularly useful when combining the operation with CropNative.

    InputOutput for Rotate(15)
    inputoutput

    It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.

    InputSamples for Rotate(-10:10)
    inputoutput
    diff --git a/operations/rotate180/index.html b/operations/rotate180/index.html deleted file mode 100644 index 04764c8c..00000000 --- a/operations/rotate180/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Rotate180: Rotate by 180 degree · Augmentor.jl

    Rotate180: Rotate by 180 degree

    Rotate180: Rotate by 180 degree

    Rotate180 <: Augmentor.AffineOperation

    Description

    Rotates the image 180 degrees. This is a special case rotation because it can be performed very efficiently by simply rearranging the existing pixels. Furthermore, the output image will have the same dimensions as the input image.

    If created using the parameter p, the operation will be lifted into Either(p=>Rotate180(), 1-p=>NoOp()), where p denotes the probability of applying Rotate180 and 1-p the probability for applying NoOp. See the documentation of Either for more information.

    Usage

    Rotate180()
    -
    -Rotate180(p)

    Arguments

    • p::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].

    See also

    Rotate90, Rotate270, Rotate, Either, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = [200 150; 50 1]
    -2×2 Array{Int64,2}:
    - 200  150
    -  50    1
    -
    -julia> img_new = augment(img, Rotate180())
    -2×2 Array{Int64,2}:
    -   1   50
    - 150  200
    source
    InputOutput for Rotate180()
    inputoutput
    diff --git a/operations/rotate270/index.html b/operations/rotate270/index.html deleted file mode 100644 index 09dc53f3..00000000 --- a/operations/rotate270/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Rotate270: Rotate downwards 90 degree · Augmentor.jl

    Rotate270: Rotate downwards 90 degree

    Rotate270: Rotate downwards 90 degree

    Rotate270 <: Augmentor.AffineOperation

    Description

    Rotates the image upwards 270 degrees, which can also be described as rotating the image downwards 90 degrees. This is a special case rotation, because it can be performed very efficiently by simply rearranging the existing pixels. However, it is generally not the case that the output image will have the same size as the input image, which is something to be aware of.

    If created using the parameter p, the operation will be lifted into Either(p=>Rotate270(), 1-p=>NoOp()), where p denotes the probability of applying Rotate270 and 1-p the probability for applying NoOp. See the documentation of Either for more information.

    Usage

    Rotate270()
    -
    -Rotate270(p)

    Arguments

    • p::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].

    See also

    Rotate90, Rotate180, Rotate, Either, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = [200 150; 50 1]
    -2×2 Array{Int64,2}:
    - 200  150
    -  50    1
    -
    -julia> img_new = augment(img, Rotate270())
    -2×2 Array{Int64,2}:
    - 50  200
    -  1  150
    source
    InputOutput for Rotate270()
    inputoutput
    diff --git a/operations/rotate90/index.html b/operations/rotate90/index.html deleted file mode 100644 index 191ecb13..00000000 --- a/operations/rotate90/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Rotate90: Rotate upwards 90 degree · Augmentor.jl

    Rotate90: Rotate upwards 90 degree

    Rotate90: Rotate upwards 90 degree

    Rotate90 <: Augmentor.AffineOperation

    Description

    Rotates the image upwards 90 degrees. This is a special case rotation because it can be performed very efficiently by simply rearranging the existing pixels. However, it is generally not the case that the output image will have the same size as the input image, which is something to be aware of.

    If created using the parameter p, the operation will be lifted into Either(p=>Rotate90(), 1-p=>NoOp()), where p denotes the probability of applying Rotate90 and 1-p the probability for applying NoOp. See the documentation of Either for more information.

    Usage

    Rotate90()
    -
    -Rotate90(p)

    Arguments

    • p::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].

    See also

    Rotate180, Rotate270, Rotate, Either, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = [200 150; 50 1]
    -2×2 Array{Int64,2}:
    - 200  150
    -  50    1
    -
    -julia> img_new = augment(img, Rotate90())
    -2×2 Array{Int64,2}:
    - 150   1
    - 200  50
    source
    InputOutput for Rotate90()
    inputoutput
    diff --git a/operations/scale/index.html b/operations/scale/index.html deleted file mode 100644 index 12b7d0fb..00000000 --- a/operations/scale/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Scale: Relative resizing · Augmentor.jl

    Scale: Relative resizing

    Scale: Relative resizing

    Augmentor.ScaleType.
    Scale <: Augmentor.AffineOperation

    Description

    Multiplies the image height and image width by the specified factors. This means that the size of the output image depends on the size of the input image.

    The provided factors can either be numbers or vectors of numbers.

    • If numbers are provided, then the operation is deterministic and will always scale the input image with the same factors.

    • In the case vectors are provided, then each time the operation is applied a valid index is sampled and the elements corresponding to that index are used as scaling factors.

    The scaling is performed relative to the image center, which can be useful when following the operation with CropNative.

    Usage

    Scale(factors)
    -
    -Scale(factors...)

    Arguments

    • factors : NTuple or Vararg of Real or AbstractVector that denote the scale factor(s) for each array dimension. If only one variable is specified it is assumed that height and width should be scaled by the same factor(s).

    See also

    Zoom, Resize, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# half the image size
    -augment(img, Scale(0.5))
    -
    -# uniformly scale by a random factor from 1.2, 1.3, or 1.4
    -augment(img, Scale([1.2, 1.3, 1.4]))
    -
    -# scale by either 0.5x0.7 or by 0.6x0.8
    -augment(img, Scale([0.5, 0.6], [0.7, 0.8]))
    source
    InputOutput for Scale(0.9, 0.5)
    inputoutput

    In the case that only a single scale factor is specified, the operation will assume that the intention is to scale all dimensions uniformly by that factor.

    InputOutput for Scale(1.2)
    inputoutput

    It is also possible to pass some abstract vector(s) to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.

    InputSamples for Scale(0.9:0.05:1.2)
    inputoutput
    diff --git a/operations/shearx/index.html b/operations/shearx/index.html deleted file mode 100644 index 2d8d9fec..00000000 --- a/operations/shearx/index.html +++ /dev/null @@ -1,12 +0,0 @@ - -ShearX: Shear horizontally · Augmentor.jl

    ShearX: Shear horizontally

    ShearX: Shear horizontally

    ShearX <: Augmentor.AffineOperation

    Description

    Shear the image horizontally for the given degree. This operation can only be performed as an affine transformation and will in general cause other operations of the pipeline to use their affine formulation as well (if they have one).

    It will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.

    Usage

    ShearX(degree)

    Arguments

    • degree : Real or AbstractVector of Real that denote the shearing angle(s) in degree. If a vector is provided, then a random element will be sampled each time the operation is applied.

    See also

    ShearY, CropNative, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# shear horizontally exactly 5 degree
    -augment(img, ShearX(5))
    -
    -# shear horizontally between 10 and 20 degree to the right
    -augment(img, ShearX(10:20))
    -
    -# shear horizontally one of the five specified degrees
    -augment(img, ShearX([-10, -5, 0, 5, 10]))
    source

    It will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.

    InputOutput for ShearX(10)
    inputoutput

    It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.

    InputSamples for ShearX(-10:10)
    inputoutput
    diff --git a/operations/sheary/index.html b/operations/sheary/index.html deleted file mode 100644 index b0134c42..00000000 --- a/operations/sheary/index.html +++ /dev/null @@ -1,12 +0,0 @@ - -ShearY: Shear vertically · Augmentor.jl

    ShearY: Shear vertically

    ShearY: Shear vertically

    ShearY <: Augmentor.AffineOperation

    Description

    Shear the image vertically for the given degree. This operation can only be performed as an affine transformation and will in general cause other operations of the pipeline to use their affine formulation as well (if they have one).

    It will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.

    Usage

    ShearY(degree)

    Arguments

    • degree : Real or AbstractVector of Real that denote the shearing angle(s) in degree. If a vector is provided, then a random element will be sampled each time the operation is applied.

    See also

    ShearX, CropNative, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# shear vertically exactly 5 degree
    -augment(img, ShearY(5))
    -
    -# shear vertically between 10 and 20 degree upwards
    -augment(img, ShearY(10:20))
    -
    -# shear vertically one of the five specified degrees
    -augment(img, ShearY([-10, -5, 0, 5, 10]))
    source

    It will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.

    InputOutput for ShearY(10)
    inputoutput

    It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.

    InputSamples for ShearY(-10:10)
    inputoutput
    diff --git a/operations/splitchannels/index.html b/operations/splitchannels/index.html deleted file mode 100644 index db965f88..00000000 --- a/operations/splitchannels/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -SplitChannels: Separate color channels · Augmentor.jl

    SplitChannels: Separate color channels

    SplitChannels: Separate color channels

    SplitChannels <: Augmentor.Operation

    Description

    Splits out the color channels of the given image using the function ImageCore.channelview. This will effectively create a new array dimension for the colors in the front. In contrast to ImageCore.channelview it will also result in a new dimension for gray images.

    This operation is mainly useful at the end of a pipeline in combination with PermuteDims in order to prepare the image for the training algorithm, which often requires the color channels to be separate.

    Usage

    SplitChannels()

    See also

    PermuteDims, CombineChannels, augment

    Examples

    julia> using Augmentor
    -
    -julia> img = testpattern()
    -300×400 Array{RGBA{N0f8},2}:
    -[...]
    -
    -julia> augment(img, SplitChannels())
    -4×300×400 Array{N0f8,3}:
    -[...]
    -
    -julia> augment(img, SplitChannels() |> PermuteDims(3,2,1))
    -400×300×4 Array{N0f8,3}:
    -[...]
    source
    diff --git a/operations/zoom/index.html b/operations/zoom/index.html deleted file mode 100644 index 9ae8a5c0..00000000 --- a/operations/zoom/index.html +++ /dev/null @@ -1,14 +0,0 @@ - -Zoom: Scale without resize · Augmentor.jl

    Zoom: Scale without resize

    Zoom: Scale without resize

    Augmentor.ZoomType.
    Zoom <: Augmentor.ImageOperation

    Description

    Scales the image height and image width by the specified factors, but crops the image such that the original size is preserved.

    The provided factors can either be numbers or vectors of numbers.

    • If numbers are provided, then the operation is deterministic and will always scale the input image with the same factors.

    • In the case vectors are provided, then each time the operation is applied a valid index is sampled and the elements corresponding to that index are used as scaling factors.

    In contrast to Scale the size of the output image is the same as the size of the input image, while the content is scaled the same way. The same effect could be achieved by following a Scale with a CropSize, with the caveat that one would need to know the exact size of the input image before-hand.

    Usage

    Zoom(factors)
    -
    -Zoom(factors...)

    Arguments

    • factors : NTuple or Vararg of Real or AbstractVector that denote the scale factor(s) for each array dimension. If only one variable is specified it is assumed that height and width should be scaled by the same factor(s).

    See also

    Scale, Resize, augment

    Examples

    using Augmentor
    -img = testpattern()
    -
    -# half the image size
    -augment(img, Zoom(0.5))
    -
    -# uniformly scale by a random factor from 1.2, 1.3, or 1.4
    -augment(img, Zoom([1.2, 1.3, 1.4]))
    -
    -# scale by either 0.5x0.7 or by 0.6x0.8
    -augment(img, Zoom([0.5, 0.6], [0.7, 0.8]))
    source
    InputOutput for Zoom(1.2)
    inputoutput

    It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.

    InputSamples for Zoom(0.9:0.05:1.3)
    inputoutput
    diff --git a/search/index.html b/search/index.html deleted file mode 100644 index 71cbccf9..00000000 --- a/search/index.html +++ /dev/null @@ -1,2 +0,0 @@ - -Search · Augmentor.jl

    Search

    Search

    Number of results: loading...

      diff --git a/search_index.js b/search_index.js deleted file mode 100644 index acdba0e8..00000000 --- a/search_index.js +++ /dev/null @@ -1,1147 +0,0 @@ -var documenterSearchIndex = {"docs": [ - -{ - "location": "#", - "page": "Home", - "title": "Home", - "category": "page", - "text": "(Image: header)A fast library for increasing the number of training images by applying various transformations." -}, - -{ - "location": "#Augmentor.jl\'s-documentation-1", - "page": "Home", - "title": "Augmentor.jl\'s documentation", - "category": "section", - "text": "Augmentor is a real-time image augmentation library designed to render the process of artificial dataset enlargement more convenient, less error prone, and easier to reproduce. It offers the user the ability to build a stochastic image-processing pipeline (or simply augmentation pipeline) using image operations as building blocks. In other words, an augmentation pipeline is little more but a sequence of operations for which the parameters can (but need not) be random variables, as the following code snippet demonstrates.using Augmentor\npl = ElasticDistortion(6, scale=0.3, border=true) |>\n Rotate([10, -5, -3, 0, 3, 5, 10]) |>\n ShearX(-10:10) * ShearY(-10:10) |>\n CropSize(28, 28) |>\n Zoom(0.9:0.1:1.2)Such a pipeline can then be used for sampling. Here we use the first few examples of the MNIST database.# I can\'t use Reel.jl, because the way it stores the tmp pngs\n# causes the images to be upscaled too much.\nusing Augmentor, MLDatasets, Images, Colors\nusing PaddedViews, OffsetArrays\n\npl = ElasticDistortion(6, scale=0.3, border=true) |>\n Rotate([10, -5, -3, 0, 3, 5, 10]) |>\n ShearX(-10:10) * ShearY(-10:10) |>\n CropSize(28, 28) |>\n Zoom(0.9:0.1:1.2)\n\nmd_imgs = String[]\nfor i in 1:24\n srand(i) # somehow srand in the beginning isn\'t enough\n input = MNIST.convert2image(MNIST.traintensor(i))\n imgs = [augment(input, pl) for j in 1:20]\n insert!(imgs, 1, first(imgs)) # otherwise loop isn\'t smooth\n fnames = map(imgs) do img\n tpath = tempname() * \".png\"\n save(tpath, img)\n tpath\n end\n args = reduce(vcat, [[fname, \"-delay\", \"1x4\", \"-alpha\", \"deactivate\"] for fname in fnames])\n convert = strip(readstring(`which convert`))\n outname = joinpath(\"assets\", \"idx_mnist_$i.gif\")\n run(`$convert $args $outname`)\n push!(md_imgs, \"[![mnist $i]($outname)](@ref mnist)\")\n foreach(fname -> rm(fname), fnames)\nend\nMarkdown.parse(join(md_imgs, \" \"))The Julia version of Augmentor is engineered specifically for high performance applications. It makes use of multiple heuristics to generate efficient tailor-made code for the concrete user-specified augmentation pipeline. In particular Augmentor tries to avoid the need for any intermediate images, but instead aims to compute the output image directly from the input in one single pass." -}, - -{ - "location": "#Where-to-begin?-1", - "page": "Home", - "title": "Where to begin?", - "category": "section", - "text": "If this is the first time you consider using Augmentor.jl for your machine learning related experiments or packages, make sure to check out the \"Getting Started\" section. There we list the installation instructions and some simple hello world examples.Pages = [\"gettingstarted.md\"]\nDepth = 2Augmentor.jl is the Julia package for Augmentor. You can find the Python version here." -}, - -{ - "location": "#Introduction-and-Motivation-1", - "page": "Home", - "title": "Introduction and Motivation", - "category": "section", - "text": "If you are new to image augmentation in general, or are simply interested in some background information, feel free to take a look at the following sections. There we discuss the concepts involved and outline the most important terms and definitions.Pages = [\"background.md\"]\nDepth = 2In case you have not worked with image data in Julia before, feel free to browse the following documents for a crash course on how image data is represented in the Julia language, as well as how to visualize it. For more information on image processing in Julia, take a look at the documentation for the vast JuliaImages ecosystem.Pages = [\"images.md\"]\nDepth = 2" -}, - -{ - "location": "#User\'s-Guide-1", - "page": "Home", - "title": "User\'s Guide", - "category": "section", - "text": "As the name suggests, Augmentor was designed with image augmentation for machine learning in mind. That said, the way the library is implemented allows it to also be used for efficient image processing outside the machine learning domain.The following section describes the high-level user interface in detail. In particular it focuses on how a (stochastic) image-processing pipeline can be defined and then be applied to an image (or a set of images). It also discusses how batch processing of multiple images can be performed in parallel using multi-threading.Pages = [\"interface.md\"]\nDepth = 2We mentioned before that an augmentation pipeline is just a sequence of image operations. Augmentor ships with a number of predefined operations, which should be sufficient to describe the most commonly utilized augmentation strategies. Each operation is represented as its own unique type. The following section provides a complete list of all the exported operations and their documentation.Pages = [\"operations.md\"]\nDepth = 2" -}, - -{ - "location": "#Tutorials-1", - "page": "Home", - "title": "Tutorials", - "category": "section", - "text": "Just like an image can say more than a thousand words, a simple hands-on tutorial showing actual code can say more than many pages of formal documentation.The first step of devising a successful augmentation strategy is to identify an appropriate set of operations and parameters. What that means can vary widely, because the utility of each operation depends on the dataset at hand (see label-preserving transformations for an example). To that end, we will spend the first tutorial discussing a simple but useful approach to interactively explore and visualize the space of possible parameters.Pages = [joinpath(\"generated\", \"mnist_elastic.md\")]\nDepth = 2In the next tutorials we will take a close look at how we can actually use Augmentor in combination with popular deep learning frameworks. The first framework we will discuss will be Knet. In particular we will focus on adapting an already existing example to make use of a (quite complicated) augmentation pipeline. Furthermore, this tutorial will also serve to showcase the various ways that augmentation can influence the performance of your network.Pages = [joinpath(\"generated\", \"mnist_knet.md\")]\nDepth = 2# Pages = [joinpath(\"generated\", fname) for fname in readdir(\"generated\") if splitext(fname)[2] == \".md\"]\n# Depth = 2" -}, - -{ - "location": "#Citing-Augmentor-1", - "page": "Home", - "title": "Citing Augmentor", - "category": "section", - "text": "If you use Augmentor for academic research and wish to cite it, please use the following paper.Marcus D. Bloice, Christof Stocker, and Andreas Holzinger, Augmentor: An Image Augmentation Library for Machine Learning, arXiv preprint arXiv:1708.04680, https://arxiv.org/abs/1708.04680, 2017." -}, - -{ - "location": "#Indices-1", - "page": "Home", - "title": "Indices", - "category": "section", - "text": "Pages = [\"indices.md\"]" -}, - -{ - "location": "gettingstarted/#", - "page": "Getting Started", - "title": "Getting Started", - "category": "page", - "text": "" -}, - -{ - "location": "gettingstarted/#Getting-Started-1", - "page": "Getting Started", - "title": "Getting Started", - "category": "section", - "text": "In this section we will provide a condensed overview of the package. In order to keep this overview concise, we will not discuss any background information or theory on the losses here in detail." -}, - -{ - "location": "gettingstarted/#Installation-1", - "page": "Getting Started", - "title": "Installation", - "category": "section", - "text": "To install Augmentor.jl, start up Julia and type the following code-snipped into the REPL. It makes use of the native Julia package manger.Pkg.add(\"Augmentor\")Additionally, for example if you encounter any sudden issues, or in the case you would like to contribute to the package, you can manually choose to be on the latest (untagged) version.Pkg.checkout(\"Augmentor\")" -}, - -{ - "location": "gettingstarted/#Example-1", - "page": "Getting Started", - "title": "Example", - "category": "section", - "text": "The following code snippet shows how a stochastic augmentation pipeline can be specified using simple building blocks that we call \"operations\". In order to give the example some meaning, we will use a real medical image from the publicly available ISIC archive as input. The concrete image can be downloaded here using their Web API.julia> using Augmentor, ISICArchive\n\njulia> img = get(ImageThumbnailRequest(id = \"5592ac599fc3c13155a57a85\"))\n169×256 Array{RGB{N0f8},2}:\n[...]\n\njulia> pl = Either(1=>FlipX(), 1=>FlipY(), 2=>NoOp()) |>\n Rotate(0:360) |>\n ShearX(-5:5) * ShearY(-5:5) |>\n CropSize(165, 165) |>\n Zoom(1:0.05:1.2) |>\n Resize(64, 64)\n6-step Augmentor.ImmutablePipeline:\n 1.) Either: (25%) Flip the X axis. (25%) Flip the Y axis. (50%) No operation.\n 2.) Rotate by θ ∈ 0:360 degree\n 3.) Either: (50%) ShearX by ϕ ∈ -5:5 degree. (50%) ShearY by ψ ∈ -5:5 degree.\n 4.) Crop a 165×165 window around the center\n 5.) Zoom by I ∈ {1.0×1.0, 1.05×1.05, 1.1×1.1, 1.15×1.15, 1.2×1.2}\n 6.) Resize to 64×64\n\njulia> img_new = augment(img, pl)\n64×64 Array{RGB{N0f8},2}:\n[...]using Augmentor, ISICArchive;\n\nimg = get(ImageThumbnailRequest(id = \"5592ac599fc3c13155a57a85\"))\n\npl = Either(1=>FlipX(), 1=>FlipY(), 2=>NoOp()) |>\n Rotate(0:360) |>\n ShearX(-5:5) * ShearY(-5:5) |>\n CropSize(165, 165) |>\n Zoom(1:0.05:1.2) |>\n Resize(64, 64)\n\nimg_new = augment(img, pl)\n\nusing Plots\npyplot(reuse = true)\ndefault(bg_outside=colorant\"#F3F6F6\")\nsrand(123)\n\n# Create image that shows the input\nplot(img, size=(256,169), xlim=(1,255), ylim=(1,168), grid=false, ticks=true)\nPlots.png(joinpath(\"assets\",\"isic_in.png\"))\n\n# create animate gif that shows 10 outputs\nanim = @animate for i=1:10\n plot(augment(img, pl), size=(169,169), xlim=(1,63), ylim=(1,63), grid=false, ticks=true)\nend\nPlots.gif(anim, joinpath(\"assets\",\"isic_out.gif\"), fps = 2)\n\nnothingThe function augment will generate a single augmented image from the given input image and pipeline. To visualize the effect we compiled a few resulting output images into a GIF using the plotting library Plots.jl with the PyPlot.jl back-end. You can inspect the full code by clicking on \"Edit on Github\" in the top right corner of this page.Input (img) Output (img_new)\n(Image: input) → (Image: output)" -}, - -{ - "location": "gettingstarted/#Getting-Help-1", - "page": "Getting Started", - "title": "Getting Help", - "category": "section", - "text": "To get help on specific functionality you can either look up the information here, or if you prefer you can make use of Julia\'s native doc-system. The following example shows how to get additional information on augment within Julia\'s REPL:?augmentIf you find yourself stuck or have other questions concerning the package you can find us at gitter or the Machine Learning domain on discourse.julialang.orgJulia ML on Gitter\nMachine Learning on JulialangIf you encounter a bug or would like to participate in the development of this package come find us on Github.Evizero/Augmentor.jl" -}, - -{ - "location": "background/#", - "page": "Background and Motivation", - "title": "Background and Motivation", - "category": "page", - "text": "" -}, - -{ - "location": "background/#Background-and-Motivation-1", - "page": "Background and Motivation", - "title": "Background and Motivation", - "category": "section", - "text": "In this section we will discuss the concept of image augmentation in general. In particular we will introduce some terminology and useful definitions." -}, - -{ - "location": "background/#What-is-Image-Augmentation?-1", - "page": "Background and Motivation", - "title": "What is Image Augmentation?", - "category": "section", - "text": "The term data augmentation is commonly used to describe the process of repeatedly applying various transformations to some dataset, with the hope that the output (i.e. the newly generated observations) bias the model towards learning better features. Depending on the structure and semantics of the data, coming up with such transformations can be a challenge by itself.Images are a special class of data that exhibit some interesting properties in respect to their structure. For example do the dimensions of an image (i.e. the pixel) exhibit a spatial relationship to each other. As such, a lot of commonly used augmentation strategies for image data revolve around affine transformations, such as translations or rotations. Because images are such a popular and special case of data, they deserve their own sub-category of data augmentation, which we will unsurprisingly refer to as image augmentation.The general idea is the following: if we want our model to generalize well, then we should design the learning process in such a way as to bias the model into learning such transformation-equivariant properties. One way to do this is via the design of the model itself, which for example was idea behind convolutional neural networks. An orthogonal approach to bias the model to learn about this equivariance - and the focus of this package - is by using label-preserving transformations." -}, - -{ - "location": "background/#labelpreserving-1", - "page": "Background and Motivation", - "title": "Label-preserving Transformations", - "category": "section", - "text": "Before attempting to train a model using some augmentation pipeline, it\'s a good idea to invest some time in deciding on an appropriate set of transformations to choose from. Some of these transformations also have parameters to tune, and we should also make sure that we settle on a decent set of values for those.What constitutes as \"decent\" depends on the dataset. In general we want the augmented images to be fairly dissimilar to the originals. However, we need to be careful that the augmented images still visually represent the same concept (and thus label). If a pipeline only produces output images that have this property we call this pipeline label-preserving." -}, - -{ - "location": "background/#mnist-1", - "page": "Background and Motivation", - "title": "Example: MNIST Handwritten Digits", - "category": "section", - "text": "Consider the following example from the MNIST database of handwritten digits [MNIST1998]. Our input image clearly represents its associated label \"6\". If we were to use the transformation Rotate180 in our augmentation pipeline for this type of images, we could end up with the situation depicted by the image on the right side.using Augmentor, MLDatasets\ninput_img = MNIST.convert2image(MNIST.traintensor(19))\noutput_img = augment(input_img, Rotate180())\nusing Images, FileIO; # hide\nupsize(A) = repeat(A, inner=(4,4)); # hide\nsave(joinpath(\"assets\",\"bg_mnist_in.png\"), upsize(input_img)); # hide\nsave(joinpath(\"assets\",\"bg_mnist_out.png\"), upsize(output_img)); # hide\nnothing # hideInput (input_img) Output (output_img)\n(Image: input) (Image: output)To a human, this newly transformed image clearly represents the label \"9\", and not \"6\" like the original image did. In image augmentation, however, the assumption is that the output of the pipeline has the same label as the input. That means that in this example we would tell our model that the correct answer for the image on the right side is \"6\", which is clearly undesirable for obvious reasons.Thus, for the MNIST dataset, the transformation Rotate180 is not label-preserving and should not be used for augmentation.[MNIST1998]: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. \"The MNIST database of handwritten digits\" Website. 1998." -}, - -{ - "location": "background/#Example:-ISIC-Skin-Lesions-1", - "page": "Background and Motivation", - "title": "Example: ISIC Skin Lesions", - "category": "section", - "text": "On the other hand, the exact same transformation could very well be label-preserving for other types of images. Let us take a look at a different set of image data; this time from the medical domain.The International Skin Imaging Collaboration [ISIC] hosts a large collection of publicly available and labeled skin lesion images. A subset of that data was used in 2016\'s ISBI challenge [ISBI2016] where a subtask was lesion classification.Let\'s consider the following input image on the left side. It shows a photo of a skin lesion that was taken from above. By applying the Rotate180 operation to the input image, we end up with a transformed version shown on the right side.using Augmentor, ISICArchive\ninput_img = get(ImageThumbnailRequest(id = \"5592ac599fc3c13155a57a85\"))\noutput_img = augment(input_img, Rotate180())\nusing FileIO; # hide\nsave(joinpath(\"assets\",\"bg_isic_in.png\"), input_img); # hide\nsave(joinpath(\"assets\",\"bg_isic_out.png\"), output_img); # hide\nnothing # hideInput (input_img) Output (output_img)\n(Image: input) (Image: output)After looking at both images, one could argue that the orientation of the camera is somewhat arbitrary as long as it points to the lesion at an approximately orthogonal angle. Thus, for the ISIC dataset, the transformation Rotate180 could be considered as label-preserving and very well be tried for augmentation. Of course this does not guarantee that it will improve training time or model accuracy, but the point is that it is unlikely to hurt.[ISIC]: https://isic-archive.com/[ISBI2016]: Gutman, David; Codella, Noel C. F.; Celebi, Emre; Helba, Brian; Marchetti, Michael; Mishra, Nabin; Halpern, Allan. \"Skin Lesion Analysis toward Melanoma Detection: A Challenge at the International Symposium on Biomedical Imaging (ISBI) 2016, hosted by the International Skin Imaging Collaboration (ISIC)\". eprint arXiv:1605.01397. 2016." -}, - -{ - "location": "images/#", - "page": "Working with Images in Julia", - "title": "Working with Images in Julia", - "category": "page", - "text": "" -}, - -{ - "location": "images/#Working-with-Images-in-Julia-1", - "page": "Working with Images in Julia", - "title": "Working with Images in Julia", - "category": "section", - "text": "The Julia language provides a rich syntax as well as large set of highly-optimized functionality for working with (multi-dimensional) arrays of what is known as \"bit types\" or compositions of such. Because of this, the language lends itself particularly well to the fairly simple idea of treating images as just plain arrays. Even though this may sound as a rather tedious low-level approach, Julia makes it possible to still allow for powerful abstraction layers without the loss of generality that usually comes with that. This is accomplished with help of Julia\'s flexible type system and multiple dispatch (both of which are beyond the scope of this tutorial).While the images-are-arrays-approach makes working with images in Julia very performant, it has also been source of confusion to new community members. This beginner\'s guide is an attempt to provide a step-by-step overview of how pixel data is handled in Julia. To get a more detailed explanation on some particular concept involved, please take a look at the documentation of the JuliaImages ecosystem." -}, - -{ - "location": "images/#Multi-dimensional-Arrays-1", - "page": "Working with Images in Julia", - "title": "Multi-dimensional Arrays", - "category": "section", - "text": "To wrap our heads around Julia\'s array-based treatment of images, we first need to understand what Julia arrays are and how we can work with them.note: Note\nThis section is only intended provide a simplified and thus partial overview of Julia\'s arrays capabilities in order to gain some intuition about pixel data. For a more detailed treatment of the topic please have a look at the official documentationWhenever we work with an Array in which the elements are bit-types (e.g. Int64, Float32, UInt8, etc), we can think of the array as a continuous block of memory. This is useful for many different reasons, such as cache locality and interacting with external libraries.The same block of memory can be interpreted in a number of ways. Consider the following example in which we allocate a vector (i.e. a one dimensional array) of UInt8 (i.e. bytes) with some ordered example values ranging from 1 to 6. We will think of this as our physical memory block, since it is a pretty close representation.julia> memory = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6]\n6-element Array{UInt8,1}:\n 0x01\n 0x02\n 0x03\n 0x04\n 0x05\n 0x06The same block of memory could also be interpreted differently. For example we could think of this as a matrix with 3 rows and 2 columns instead (or even the other way around). The function reinterpret allows us to do just thatjulia> A = reinterpret(UInt8, memory, (3,2))\n3×2 Array{UInt8,2}:\n 0x01 0x04\n 0x02 0x05\n 0x03 0x06Note how we specified the number of rows first. This is because the Julia language follows the column-major convention for multi dimensional arrays. What this means can be observed when we compare our new matrix A with the initial vector memory and look at the element layout. Both variables are using the same underlying memory (i.e the value 0x01 is physically stored right next to the value 0x02 in our example, while 0x01 and 0x04 are quite far apart even though the matrix interpretation makes it look like they are neighbors; which they are not).tip: Tip\nA quick and dirty way to check if two variables are representing the same block of memory is by comparing the output of pointer(myvariable). Note, however, that technically this only tells you where a variable starts in memory and thus has its limitations.This idea can also be generalized for higher dimensions. For example we can think of this as a 3D array as well.julia> reinterpret(UInt8, memory, (3,1,2))\n3×1×2 Array{UInt8,3}:\n[:, :, 1] =\n 0x01\n 0x02\n 0x03\n\n[:, :, 2] =\n 0x04\n 0x05\n 0x06If you take a closer look at the dimension sizes, you can see that all we did in that example was add a new dimension of size 1, while not changing the other numbers. In fact we can add any number of practically empty dimensions, otherwise known as singleton dimensions.julia> reinterpret(UInt8, memory, (3,1,1,1,2))\n3×1×1×1×2 Array{UInt8,5}:\n[:, :, 1, 1, 1] =\n 0x01\n 0x02\n 0x03\n\n[:, :, 1, 1, 2] =\n 0x04\n 0x05\n 0x06This is a useful property to have when we are confronted with greyscale datasets that do not have a color channel, yet we still want to work with a library that expects the images to have one." -}, - -{ - "location": "images/#Vertical-Major-vs-Horizontal-Major-1", - "page": "Working with Images in Julia", - "title": "Vertical-Major vs Horizontal-Major", - "category": "section", - "text": "There are a number of different conventions for how to store image data into a binary format. The first question one has to address is the order in which the image dimensions are transcribed.We have seen before that Julia follows the column-major convention for its arrays, which for images would lead to the corresponding convention of being vertical-major. In the image domain, however, it is fairly common to store the pixels in a horizontal-major layout. In other words, horizontal-major means that images are stored in memory (or file) one pixel row after the other.In most cases, when working within the JuliaImages ecosystem, the images should already be in the Julia-native column major layout. If for some reason that is not the case there are two possible ways to convert the image to that format.julia> At = reinterpret(UInt8, memory, (3,2))\' # \"row-major\" layout\n2×3 Array{UInt8,2}:\n 0x01 0x02 0x03\n 0x04 0x05 0x06The first way to alter the pixel order is by using the function Base.permutedims. In contrast to what we have seen before, this function will allocate a new array and copy the values in the appropriate manner.\njulia> B = permutedims(At, (2,1))\n3×2 Array{UInt8,2}:\n 0x01 0x04\n 0x02 0x05\n 0x03 0x06\nThe second way is using the function ImageCore.permuteddimsview which results in a lazy view that does not allocate a new array but instead only computes the correct values when queried.\njulia> using ImageCore\n\njulia> C = permuteddimsview(At, (2,1))\n3×2 PermutedDimsArray(::Array{UInt8,2}, (2, 1)) with element type UInt8:\n 0x01 0x04\n 0x02 0x05\n 0x03 0x06Either way, it is in general a good idea to make sure that the array one is working with ends up in a column-major layout." -}, - -{ - "location": "images/#Reinterpreting-Elements-1", - "page": "Working with Images in Julia", - "title": "Reinterpreting Elements", - "category": "section", - "text": "Up to this point, all we talked about was how to reinterpreting or permuting the dimensional layout of some continuous memory block. If you look at the examples above you will see that all the arrays have elements of type UInt8, which just means that each element is represented by a single byte in memory.Knowing all this, we can now take the idea a step further and think about reinterpreting the element types of the array. Let us consider our original vector memory again.julia> memory = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6]\n6-element Array{UInt8,1}:\n 0x01\n 0x02\n 0x03\n 0x04\n 0x05\n 0x06Note how each byte is thought of as an individual element. One thing we could do instead, is think of this memory block as a vector of 3 UInt16 elements.julia> reinterpret(UInt16, memory)\n3-element Array{UInt16,1}:\n 0x0201\n 0x0403\n 0x0605Pay attention to where our original bytes ended up. In contrast to just rearranging elements as we did before, we ended up with significantly different element values. One may ask why it would ever be practical to reinterpret a memory block like this. The one word answer to this is Colors! As we will see in the remainder of this tutorial, it turns out to be a very useful thing to do when your arrays represent pixel data." -}, - -{ - "location": "images/#Introduction-to-Color-Models-1", - "page": "Working with Images in Julia", - "title": "Introduction to Color Models", - "category": "section", - "text": "As we discussed before, there are a various number of conventions on how to store pixel data into a binary format. That is not only true for dimension priority, but also for color information.One way color information can differ is in the color model in which they are described in. Two famous examples for color models are RGB and HSV. They essentially define how colors are conceptually made up in terms of some components. Additionally, one can decide on how many bits to use to describe each color component. By doing so one defines the available color depth.Before we look into using the actual implementation of Julia\'s color models, let us prototype our own imperfect toy model in order to get a better understanding of what is happening under the hood.# define our toy color model\nstruct MyRGB\n r::UInt8\n b::UInt8\n g::UInt8\nendNote how we defined our new toy color model as struct. Because of this and the fact that all its components are bit types (in this case UInt8), any instantiation of our new type will be represented as a continuous block of memory as well.We can now apply our color model to our memory vector from above, and interpret the underlying memory as a vector of to MyRGB values instead.julia> reinterpret(MyRGB, memory)\n2-element Array{MyRGB,1}:\n MyRGB(0x01,0x02,0x03)\n MyRGB(0x04,0x05,0x06)Similar to the UInt16 example, we now group neighboring bytes into larger units (namely MyRGB). In contrast to the UInt16 example we are still able to access the individual components underneath. This simple toy color model already allows us to do a lot of useful things. We could define functions that work on MyRGB values in a color-space appropriate fashion. We could also define other color models and implement function to convert between them.However, our little toy color model is not yet optimal. For example it hard-codes a predefined color depth of 24 bit. We may have use-cases where we need a richer color space. One thing we could do to achieve that would be to introduce a new type in similar fashion. Still, because they have a different range of available numbers per channel (because they have a different amount of bits per channel), we would have to write a lot of specialized code to be able to appropriately handle all color models and depth.Luckily, the creators of ColorTypes.jl went a with a more generic strategy: Using parameterized types and fixed point numbers.tip: Tip\nIf you are interested in how various color models are actually designed and/or implemented in Julia, you can take a look at the ColorTypes.jl package." -}, - -{ - "location": "images/#Fixed-Point-Numbers-1", - "page": "Working with Images in Julia", - "title": "Fixed Point Numbers", - "category": "section", - "text": "The idea behind using fixed point numbers for each color component is fairly simple. No matter how many bits a component is made up of, we always want the largest possible value of the component to be equal to 1.0 and the smallest possible value to be equal to 0. Of course, the amount of possible intermediate numbers still depends on the number of underlying bits in the memory, but that is not much of an issue.julia> using FixedPointNumbers;\n\njulia> reinterpret(N0f8, 0xFF)\n1.0N0f8\n\njulia> reinterpret(N0f16, 0xFFFF)\n1.0N0f16Not only does this allow for simple conversion between different color depths, it also allows us to implement generic algorithms, that are completely agnostic to the utilized color depth.It is worth pointing out again, that we get all these goodies without actually changing or copying the original memory block. Remember how during this whole tutorial we have only changed the interpretation of some underlying memory, and have not had the need to copy any data so far.tip: Tip\nFor pixel data we are mainly interested in unsigned fixed point numbers, but there are others too. Check out the package FixedPointNumbers.jl for more information on fixed point numbers in general.Let us now leave our toy model behind and use the actual implementation of RGB on our example vector memory. With the first command we will interpret our data as two pixels with 8 bit per color channel, and with the second command as a single pixel of 16 bit per color channeljulia> using Colors, FixedPointNumbers;\n\njulia> reinterpret(RGB{N0f8}, memory)\n2-element Array{RGB{N0f8},1}:\n RGB{N0f8}(0.004,0.008,0.012)\n RGB{N0f8}(0.016,0.02,0.024)\n\njulia> reinterpret(RGB{N0f16}, memory)\n1-element Array{RGB{N0f16},1}:\n RGB{N0f16}(0.00783,0.01567,0.02351)Note how the values are now interpreted as floating point numbers." -}, - -{ - "location": "interface/#", - "page": "High-level Interface", - "title": "High-level Interface", - "category": "page", - "text": "" -}, - -{ - "location": "interface/#High-level-Interface-1", - "page": "High-level Interface", - "title": "High-level Interface", - "category": "section", - "text": "Integrating Augmentor into an existing project should in general not require any major changes to your code. In most cases it should break down to the three basic steps outlined below. We will spend the rest of this document investigating these in more detail.Import Augmentor into the namespace of your program.\nusing Augmentor\nDefine a (stochastic) image processing pipeline by chaining the desired operations using |> and *.\njulia> pl = FlipX() * FlipY() |> Zoom(0.9:0.1:1.2) |> CropSize(64,64)\n3-step Augmentor.ImmutablePipeline:\n 1.) Either: (50%) Flip the X axis. (50%) Flip the Y axis.\n 2.) Zoom by I ∈ {0.9×0.9, 1.0×1.0, 1.1×1.1, 1.2×1.2}\n 3.) Crop a 64×64 window around the center\nApply the pipeline to the existing image or set of images.\nimg_processed = augment(img_original, pl)Depending on the complexity of your problem, you may want to iterate between 2. and 3. to identify an appropriate pipeline. Take a look at the Elastic Distortions Tutorial for an example of how such an iterative process could look like." -}, - -{ - "location": "interface/#pipeline-1", - "page": "High-level Interface", - "title": "Defining a Pipeline", - "category": "section", - "text": "In Augmentor, a (stochastic) image-processing pipeline can be understood as a sequence of operations, for which the parameters can (but need not) be random variables. What that essentially means is that the user explicitly specifies which image operation to perform in what order. A complete list of available operations can be found at Supported Operations.To start off with a simple example, let us assume that we want to first rotate our image(s) counter-clockwise by 14°, then crop them down to the biggest possible square, and lastly resize the image(s) to a fixed size of 64 by 64 pixel. Such a pipeline would be defined as follows:julia> pl = Rotate(14) |> CropRatio(1) |> Resize(64,64)\n3-step Augmentor.ImmutablePipeline:\n 1.) Rotate 14 degree\n 2.) Crop to 1:1 aspect ratio\n 3.) Resize to 64×64Notice that in the example above there is no room for randomness. In other words, the same input image would always result in the same output image given that pipeline. If we wish for more variation we can do so by using a vector as our parameters, instead of a single number.note: Note\nIn this subsection we will focus only on how to define a pipeline, without actually thinking too much about how to apply that pipeline to an actual image. The later will be the main topic of the rest of this document.Say we wish to adapt our pipeline such that the rotation is a little more random. More specifically, lets say we want our image to be rotated by either -10°, -5°, 5°, 10°, or not at all. Other than that change we will leave the rest of the pipeline as is.julia> pl = Rotate([-10,-5,0,5,10]) |> CropRatio(1) |> Resize(64,64)\n3-step Augmentor.ImmutablePipeline:\n 1.) Rotate by θ ∈ [-10, -5, 0, 5, 10] degree\n 2.) Crop to 1:1 aspect ratio\n 3.) Resize to 64×64Variation in the parameters is only one of the two main ways to introduce randomness to our pipeline. Additionally, one can specify that an operation should be sampled randomly from a chosen set of operations . This can be accomplished using a utility operation called Either, which has its own convenience syntax.As an example, let us assume we wish to first either mirror our image(s) horizontally, or vertically, or not at all, and then crop it down to a size of 100 by 100 pixel around the image\'s center. We can specify the \"either\" using the * operator.julia> pl = FlipX() * FlipY() * NoOp() |> CropSize(100,100)\n2-step Augmentor.ImmutablePipeline:\n 1.) Either: (33%) Flip the X axis. (33%) Flip the Y axis. (33%) No operation.\n 2.) Crop a 100×100 window around the centerIt is also possible to specify the odds of for such an \"either\". For example we may want the NoOp to be twice as likely as either of the mirroring options.julia> pl = (1=>FlipX()) * (1=>FlipY()) * (2=>NoOp()) |> CropSize(100,100)\n2-step Augmentor.ImmutablePipeline:\n 1.) Either: (25%) Flip the X axis. (25%) Flip the Y axis. (50%) No operation.\n 2.) Crop a 100×100 window around the centerNow that we know how to define a pipeline, let us think about how to apply it to an image or a set of images." -}, - -{ - "location": "interface/#Augmentor.testpattern", - "page": "High-level Interface", - "title": "Augmentor.testpattern", - "category": "function", - "text": "testpattern() -> Matrix{RGBA{N0f8}}\n\nLoad and return the provided 300x400 test image.\n\nThe returned image was specifically designed to be informative about the effects of the applied augmentation operations. It is thus well suited to prototype an augmentation pipeline, because it makes it easy to see what kind of effects one can achieve with it.\n\n\n\n" -}, - -{ - "location": "interface/#Loading-the-Example-Image-1", - "page": "High-level Interface", - "title": "Loading the Example Image", - "category": "section", - "text": "Augmentor ships with a custom example image, which was specifically designed for visualizing augmentation effects. It can be accessed by calling the function testpattern(). That said, doing so explicitly should rarely be necessary in practice, because most high-level functions will default to using testpattern() if no other image is specified.testpatternusing Augmentor\nimg = testpattern()\nusing Images; # hide\nsave(joinpath(\"assets\",\"big_pattern.png\"), img); # hide\nnothing # hide(Image: testpattern)" -}, - -{ - "location": "interface/#Augmentor.augment", - "page": "High-level Interface", - "title": "Augmentor.augment", - "category": "function", - "text": "augment([img], pipeline) -> out\n\nApply the operations of the given pipeline sequentially to the given image img and return the resulting image out.\n\njulia> img = testpattern();\n\njulia> out = augment(img, FlipX() |> FlipY())\n3×2 Array{Gray{N0f8},2}:\n[...]\n\nThe parameter img can either be a single image, or a tuple of multiple images. In case img is a tuple of images, its elements will be assumed to be conceptually connected. Consequently, all images in the tuple will take the exact same path through the pipeline; even when randomness is involved. This is useful for the purpose of image segmentation, for which the input and output are both images that need to be transformed exactly the same way.\n\nimg1 = testpattern()\nimg2 = Gray.(testpattern())\nout1, out2 = augment((img1, img2), FlipX() |> FlipY())\n\nThe parameter pipeline can be a Augmentor.Pipeline, a tuple of Augmentor.Operation, or a single Augmentor.Operation.\n\nimg = testpattern()\naugment(img, FlipX() |> FlipY())\naugment(img, (FlipX(), FlipY()))\naugment(img, FlipX())\n\nIf img is omitted, Augmentor will use the augmentation test image provided by the function testpattern as the input image.\n\naugment(FlipX())\n\n\n\n" -}, - -{ - "location": "interface/#Augmentor.augment!", - "page": "High-level Interface", - "title": "Augmentor.augment!", - "category": "function", - "text": "augment!(out, img, pipeline) -> out\n\nApply the operations of the given pipeline sequentially to the image img and write the resulting image into the preallocated parameter out. For convenience out is also the function\'s return-value.\n\nimg = testpattern()\nout = similar(img)\naugment!(out, img, FlipX() |> FlipY())\n\nThe parameter img can either be a single image, or a tuple of multiple images. In case img is a tuple of images, the parameter out has to be a tuple of the same length and ordering. See augment for more information.\n\nimgs = (testpattern(), Gray.(testpattern()))\nouts = (similar(imgs[1]), similar(imgs[2]))\naugment!(outs, imgs, FlipX() |> FlipY())\n\nThe parameter pipeline can be a Augmentor.Pipeline, a tuple of Augmentor.Operation, or a single Augmentor.Operation.\n\nimg = testpattern()\nout = similar(img)\naugment!(out, img, FlipX() |> FlipY())\naugment!(out, img, (FlipX(), FlipY()))\naugment!(out, img, FlipX())\n\n\n\n" -}, - -{ - "location": "interface/#Augmenting-an-Image-1", - "page": "High-level Interface", - "title": "Augmenting an Image", - "category": "section", - "text": "Once a pipeline is constructed it can be applied to an image (i.e. AbstractArray{<:ColorTypes.Colorant}), or even just to an array of numbers (i.e. AbstractArray{<:Number}), using the function augment.augmentWe also provide a mutating version of augment that writes the output into preallocated memory. While this function avoids allocation, it does have the caveat that the size of the output image must be known beforehand (and thus must not be random).augment!" -}, - -{ - "location": "interface/#Augmentor.augmentbatch!", - "page": "High-level Interface", - "title": "Augmentor.augmentbatch!", - "category": "function", - "text": "augmentbatch!([resource], outs, imgs, pipeline, [obsdim]) -> outs\n\nApply the operations of the given pipeline to the images in imgs and write the resulting images into outs.\n\nBoth outs and imgs have to contain the same number of images. Each of these two variables can either be in the form of a higher dimensional array, in the form of a vector of arrays for which each vector element denotes an image.\n\n# create five example observations of size 3x3\nimgs = rand(3,3,5)\n# create output arrays of appropriate shape\nouts = similar(imgs)\n# transform the batch of images\naugmentbatch!(outs, imgs, FlipX() |> FlipY())\n\nIf one (or both) of the two parameters outs and imgs is a higher dimensional array, then the optional parameter obsdim can be used specify which dimension denotes the observations (defaults to ObsDim.Last()),\n\n# create five example observations of size 3x3\nimgs = rand(5,3,3)\n# create output arrays of appropriate shape\nouts = similar(imgs)\n# transform the batch of images\naugmentbatch!(outs, imgs, FlipX() |> FlipY(), ObsDim.First())\n\nSimilar to augment!, it is also allowed for outs and imgs to both be tuples of the same length. If that is the case, then each tuple element can be in any of the forms listed above. This is useful for tasks such as image segmentation, where each observations is made up of more than one image.\n\n# create five example observations where each observation is\n# made up of two conceptually linked 3x3 arrays\nimgs = (rand(3,3,5), rand(3,3,5))\n# create output arrays of appropriate shape\nouts = similar.(imgs)\n# transform the batch of images\naugmentbatch!(outs, imgs, FlipX() |> FlipY())\n\nThe parameter pipeline can be a Augmentor.Pipeline, a tuple of Augmentor.Operation, or a single Augmentor.Operation.\n\naugmentbatch!(outs, imgs, FlipX() |> FlipY())\naugmentbatch!(outs, imgs, (FlipX(), FlipY()))\naugmentbatch!(outs, imgs, FlipX())\n\nThe optional first parameter resource can either be CPU1() (default) or CPUThreads(). In the later case the images will be augmented in parallel. For this to make sense make sure that the environment variable JULIA_NUM_THREADS is set to a reasonable number so that Threads.nthreads() is greater than 1.\n\n# transform the batch of images in parallel using multithreading\naugmentbatch!(CPUThreads(), outs, imgs, FlipX() |> FlipY())\n\n\n\n" -}, - -{ - "location": "interface/#Augmenting-Image-Batches-1", - "page": "High-level Interface", - "title": "Augmenting Image Batches", - "category": "section", - "text": "In most machine learning scenarios we will want to process a whole batch of images at once, instead of a single image at a time. For this reason we provide the function augmentbatch!, which also supports multi-threading.augmentbatch!" -}, - -{ - "location": "operations/#", - "page": "Supported Operations", - "title": "Supported Operations", - "category": "page", - "text": "using Augmentor, Images, Colors\nsrand(1337)\npattern = imresize(restrict(restrict(testpattern())), (60, 80))\nsave(\"assets/tiny_pattern.png\", pattern)\n# Affine Transformations\nsave(\"assets/tiny_FlipX.png\", augment(pattern, FlipX()))\nsave(\"assets/tiny_FlipY.png\", augment(pattern, FlipY()))\nsave(\"assets/tiny_Rotate90.png\", augment(pattern, Rotate90()))\nsave(\"assets/tiny_Rotate270.png\", augment(pattern, Rotate270()))\nsave(\"assets/tiny_Rotate180.png\", augment(pattern, Rotate180()))\nsave(\"assets/tiny_Rotate.png\", augment(pattern, Rotate(15)))\nsave(\"assets/tiny_ShearX.png\", augment(pattern, ShearX(10)))\nsave(\"assets/tiny_ShearY.png\", augment(pattern, ShearY(10)))\nsave(\"assets/tiny_Scale.png\", augment(pattern, Scale(0.9,1.2)))\nsave(\"assets/tiny_Zoom.png\", augment(pattern, Zoom(0.9,1.2)))\n# Distortions\nsrand(1337)\nsave(\"assets/tiny_ED1.png\", augment(pattern, ElasticDistortion(15,15,0.1)))\nsave(\"assets/tiny_ED2.png\", augment(pattern, ElasticDistortion(10,10,0.2,4,3,true)))\n# Resizing and Subsetting\nsave(\"assets/tiny_Resize.png\", augment(pattern, Resize(60,60)))\nsave(\"assets/tiny_Crop.png\", augment(pattern, Rotate(45) |> Crop(1:50,1:80)))\nsave(\"assets/tiny_CropNative.png\", augment(pattern, Rotate(45) |> CropNative(1:50,1:80)))\nsave(\"assets/tiny_CropSize.png\", augment(pattern, CropSize(20,65)))\nsave(\"assets/tiny_CropRatio.png\", augment(pattern, CropRatio(1)))\nsrand(1337)\nsave(\"assets/tiny_RCropRatio.png\", augment(pattern, RCropRatio(1)))\n# Conversion\nsave(\"assets/tiny_ConvertEltype.png\", augment(pattern, ConvertEltype(GrayA{N0f8})))\nnothing;" -}, - -{ - "location": "operations/#operations-1", - "page": "Supported Operations", - "title": "Supported Operations", - "category": "section", - "text": "Augmentor provides a wide varitey of build-in image operations. This page provides an overview of all exported operations organized by their main category. These categories are chosen because they serve some practical purpose. For example Affine Operations allow for a special optimization under the hood when chained together.tip: Tip\nClick on an image operation for more details." -}, - -{ - "location": "operations/#Affine-Transformations-1", - "page": "Supported Operations", - "title": "Affine Transformations", - "category": "section", - "text": "A sizeable amount of the provided operations fall under the category of affine transformations. As such, they can be described using what is known as an affine map, which are inherently compose-able if chained together. However, utilizing such a affine formulation requires (costly) interpolation, which may not always be needed to achieve the desired effect. For that reason do some of the operations below also provide a special purpose implementation to produce their specified result. Those are usually preferred over the affine formulation if sensible considering the complete pipeline.Input FlipX FlipY Rotate90 Rotate270 Rotate180\n(Image: ) → (Image: ) (Image: ) (Image: ) (Image: ) (Image: )\nInput Rotate ShearX ShearY Scale Zoom\n(Image: ) → (Image: ) (Image: ) (Image: ) (Image: ) (Image: )" -}, - -{ - "location": "operations/#Distortions-1", - "page": "Supported Operations", - "title": "Distortions", - "category": "section", - "text": "Aside from affine transformations, Augmentor also provides functionality for performing a variety of distortions. These types of operations usually provide a much larger distribution of possible output images.Input ElasticDistortion\n(Image: ) → (Image: )" -}, - -{ - "location": "operations/#Resizing-and-Subsetting-1", - "page": "Supported Operations", - "title": "Resizing and Subsetting", - "category": "section", - "text": "The input images from a given dataset can be of various shapes and sizes. Yet, it is often required by the algorithm that the data must be of uniform structure. To that end Augmentor provides a number of ways to alter or subset given images.Input Resize\n(Image: ) → (Image: )The process of cropping is useful to discard parts of the input image. To provide this functionality lazily, applying a crop introduces a layer of representation called a \"view\" or SubArray. This is different yet compatible with how affine operations or other special purpose implementations work. This means that chaining a crop with some affine operation is perfectly fine if done sequentially. However, it is generally not advised to combine affine operations with crop operations within an Either block. Doing that would force the Either to trigger the eager computation of its branches in order to preserve type-stability.Input Crop CropNative CropSize CropRatio RCropRatio\n(Image: ) → (Image: ) (Image: ) (Image: ) (Image: ) (Image: )" -}, - -{ - "location": "operations/#Element-wise-Transformations-and-Layout-1", - "page": "Supported Operations", - "title": "Element-wise Transformations and Layout", - "category": "section", - "text": "It is not uncommon that machine learning frameworks require the data in a specific form and layout. For example many deep learning frameworks expect the colorchannel of the images to be encoded in the third dimension of a 4-dimensional array. Augmentor allows to convert from (and to) these different layouts using special operations that are mainly useful in the beginning or end of a augmentation pipeline.Category Available Operations\nConversion ConvertEltype (e.g. convert to grayscale)\nMapping MapFun, AggregateThenMapFun\nInformation Layout SplitChannels, CombineChannels, PermuteDims, Reshape" -}, - -{ - "location": "operations/#Utility-Operations-1", - "page": "Supported Operations", - "title": "Utility Operations", - "category": "section", - "text": "Aside from \"true\" operations that specify some kind of transformation, there are also a couple of special utility operations used for functionality such as stochastic branching.Category Available Operations\nUtility Operations NoOp, CacheImage, Either" -}, - -{ - "location": "operations/flipx/#", - "page": "FlipX: Mirror horizontally", - "title": "FlipX: Mirror horizontally", - "category": "page", - "text": "" -}, - -{ - "location": "operations/flipx/#Augmentor.FlipX", - "page": "FlipX: Mirror horizontally", - "title": "Augmentor.FlipX", - "category": "type", - "text": "FlipX <: Augmentor.AffineOperation\n\nDescription\n\nReverses the x-order of each pixel row. Another way of describing it would be that it mirrors the image on the y-axis, or that it mirrors the image horizontally.\n\nIf created using the parameter p, the operation will be lifted into Either(p=>FlipX(), 1-p=>NoOp()), where p denotes the probability of applying FlipX and 1-p the probability for applying NoOp. See the documentation of Either for more information.\n\nUsage\n\nFlipX()\n\nFlipX(p)\n\nArguments\n\np::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].\n\nSee also\n\nFlipY, Either, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = [200 150; 50 1]\n2×2 Array{Int64,2}:\n 200 150\n 50 1\n\njulia> img_new = augment(img, FlipX())\n2×2 Array{Int64,2}:\n 150 200\n 1 50\n\n\n\n" -}, - -{ - "location": "operations/flipx/#FlipX-1", - "page": "FlipX: Mirror horizontally", - "title": "FlipX: Mirror horizontally", - "category": "section", - "text": "FlipXinclude(\"optable.jl\")\n@optable FlipX()" -}, - -{ - "location": "operations/flipy/#", - "page": "FlipY: Mirror vertically", - "title": "FlipY: Mirror vertically", - "category": "page", - "text": "" -}, - -{ - "location": "operations/flipy/#Augmentor.FlipY", - "page": "FlipY: Mirror vertically", - "title": "Augmentor.FlipY", - "category": "type", - "text": "FlipY <: Augmentor.AffineOperation\n\nDescription\n\nReverses the y-order of each pixel column. Another way of describing it would be that it mirrors the image on the x-axis, or that it mirrors the image vertically.\n\nIf created using the parameter p, the operation will be lifted into Either(p=>FlipY(), 1-p=>NoOp()), where p denotes the probability of applying FlipY and 1-p the probability for applying NoOp. See the documentation of Either for more information.\n\nUsage\n\nFlipY()\n\nFlipY(p)\n\nArguments\n\np::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].\n\nSee also\n\nFlipX, Either, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = [200 150; 50 1]\n2×2 Array{Int64,2}:\n 200 150\n 50 1\n\njulia> img_new = augment(img, FlipY())\n2×2 Array{Int64,2}:\n 50 1\n 200 150\n\n\n\n" -}, - -{ - "location": "operations/flipy/#FlipY-1", - "page": "FlipY: Mirror vertically", - "title": "FlipY: Mirror vertically", - "category": "section", - "text": "FlipYinclude(\"optable.jl\")\n@optable FlipY()" -}, - -{ - "location": "operations/rotate90/#", - "page": "Rotate90: Rotate upwards 90 degree", - "title": "Rotate90: Rotate upwards 90 degree", - "category": "page", - "text": "" -}, - -{ - "location": "operations/rotate90/#Augmentor.Rotate90", - "page": "Rotate90: Rotate upwards 90 degree", - "title": "Augmentor.Rotate90", - "category": "type", - "text": "Rotate90 <: Augmentor.AffineOperation\n\nDescription\n\nRotates the image upwards 90 degrees. This is a special case rotation because it can be performed very efficiently by simply rearranging the existing pixels. However, it is generally not the case that the output image will have the same size as the input image, which is something to be aware of.\n\nIf created using the parameter p, the operation will be lifted into Either(p=>Rotate90(), 1-p=>NoOp()), where p denotes the probability of applying Rotate90 and 1-p the probability for applying NoOp. See the documentation of Either for more information.\n\nUsage\n\nRotate90()\n\nRotate90(p)\n\nArguments\n\np::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].\n\nSee also\n\nRotate180, Rotate270, Rotate, Either, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = [200 150; 50 1]\n2×2 Array{Int64,2}:\n 200 150\n 50 1\n\njulia> img_new = augment(img, Rotate90())\n2×2 Array{Int64,2}:\n 150 1\n 200 50\n\n\n\n" -}, - -{ - "location": "operations/rotate90/#Rotate90-1", - "page": "Rotate90: Rotate upwards 90 degree", - "title": "Rotate90: Rotate upwards 90 degree", - "category": "section", - "text": "Rotate90include(\"optable.jl\")\n@optable Rotate90()" -}, - -{ - "location": "operations/rotate270/#", - "page": "Rotate270: Rotate downwards 90 degree", - "title": "Rotate270: Rotate downwards 90 degree", - "category": "page", - "text": "" -}, - -{ - "location": "operations/rotate270/#Augmentor.Rotate270", - "page": "Rotate270: Rotate downwards 90 degree", - "title": "Augmentor.Rotate270", - "category": "type", - "text": "Rotate270 <: Augmentor.AffineOperation\n\nDescription\n\nRotates the image upwards 270 degrees, which can also be described as rotating the image downwards 90 degrees. This is a special case rotation, because it can be performed very efficiently by simply rearranging the existing pixels. However, it is generally not the case that the output image will have the same size as the input image, which is something to be aware of.\n\nIf created using the parameter p, the operation will be lifted into Either(p=>Rotate270(), 1-p=>NoOp()), where p denotes the probability of applying Rotate270 and 1-p the probability for applying NoOp. See the documentation of Either for more information.\n\nUsage\n\nRotate270()\n\nRotate270(p)\n\nArguments\n\np::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].\n\nSee also\n\nRotate90, Rotate180, Rotate, Either, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = [200 150; 50 1]\n2×2 Array{Int64,2}:\n 200 150\n 50 1\n\njulia> img_new = augment(img, Rotate270())\n2×2 Array{Int64,2}:\n 50 200\n 1 150\n\n\n\n" -}, - -{ - "location": "operations/rotate270/#Rotate270-1", - "page": "Rotate270: Rotate downwards 90 degree", - "title": "Rotate270: Rotate downwards 90 degree", - "category": "section", - "text": "Rotate270include(\"optable.jl\")\n@optable Rotate270()" -}, - -{ - "location": "operations/rotate180/#", - "page": "Rotate180: Rotate by 180 degree", - "title": "Rotate180: Rotate by 180 degree", - "category": "page", - "text": "" -}, - -{ - "location": "operations/rotate180/#Augmentor.Rotate180", - "page": "Rotate180: Rotate by 180 degree", - "title": "Augmentor.Rotate180", - "category": "type", - "text": "Rotate180 <: Augmentor.AffineOperation\n\nDescription\n\nRotates the image 180 degrees. This is a special case rotation because it can be performed very efficiently by simply rearranging the existing pixels. Furthermore, the output image will have the same dimensions as the input image.\n\nIf created using the parameter p, the operation will be lifted into Either(p=>Rotate180(), 1-p=>NoOp()), where p denotes the probability of applying Rotate180 and 1-p the probability for applying NoOp. See the documentation of Either for more information.\n\nUsage\n\nRotate180()\n\nRotate180(p)\n\nArguments\n\np::Number : Optional. Probability of applying the operation. Must be in the interval [0,1].\n\nSee also\n\nRotate90, Rotate270, Rotate, Either, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = [200 150; 50 1]\n2×2 Array{Int64,2}:\n 200 150\n 50 1\n\njulia> img_new = augment(img, Rotate180())\n2×2 Array{Int64,2}:\n 1 50\n 150 200\n\n\n\n" -}, - -{ - "location": "operations/rotate180/#Rotate180-1", - "page": "Rotate180: Rotate by 180 degree", - "title": "Rotate180: Rotate by 180 degree", - "category": "section", - "text": "Rotate180include(\"optable.jl\")\n@optable Rotate180()" -}, - -{ - "location": "operations/rotate/#", - "page": "Rotate: Arbitrary rotations", - "title": "Rotate: Arbitrary rotations", - "category": "page", - "text": "" -}, - -{ - "location": "operations/rotate/#Augmentor.Rotate", - "page": "Rotate: Arbitrary rotations", - "title": "Augmentor.Rotate", - "category": "type", - "text": "Rotate <: Augmentor.AffineOperation\n\nDescription\n\nRotate the image upwards for the given degree. This operation can only be performed as an affine transformation and will in general cause other operations of the pipeline to use their affine formulation as well (if they have one).\n\nIn contrast to the special case rotations (e.g. Rotate90, the type Rotate can describe any arbitrary number of degrees. It will always perform the rotation around the center of the image. This can be particularly useful when combining the operation with CropNative.\n\nUsage\n\nRotate(degree)\n\nArguments\n\ndegree : Real or AbstractVector of Real that denote the rotation angle(s) in degree. If a vector is provided, then a random element will be sampled each time the operation is applied.\n\nSee also\n\nRotate90, Rotate180, Rotate270, CropNative, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# rotate exactly 45 degree\naugment(img, Rotate(45))\n\n# rotate between 10 and 20 degree upwards\naugment(img, Rotate(10:20))\n\n# rotate one of the five specified degrees\naugment(img, Rotate([-10, -5, 0, 5, 10]))\n\n\n\n" -}, - -{ - "location": "operations/rotate/#Rotate-1", - "page": "Rotate: Arbitrary rotations", - "title": "Rotate: Arbitrary rotations", - "category": "section", - "text": "RotateIn contrast to the special case rotations outlined above, the type Rotate can describe any arbitrary number of degrees. It will always perform the rotation around the center of the image. This can be particularly useful when combining the operation with CropNative.include(\"optable.jl\")\n@optable Rotate(15)It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.include(\"optable.jl\")\n@optable 10 => Rotate(-10:10)" -}, - -{ - "location": "operations/shearx/#", - "page": "ShearX: Shear horizontally", - "title": "ShearX: Shear horizontally", - "category": "page", - "text": "" -}, - -{ - "location": "operations/shearx/#Augmentor.ShearX", - "page": "ShearX: Shear horizontally", - "title": "Augmentor.ShearX", - "category": "type", - "text": "ShearX <: Augmentor.AffineOperation\n\nDescription\n\nShear the image horizontally for the given degree. This operation can only be performed as an affine transformation and will in general cause other operations of the pipeline to use their affine formulation as well (if they have one).\n\nIt will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.\n\nUsage\n\nShearX(degree)\n\nArguments\n\ndegree : Real or AbstractVector of Real that denote the shearing angle(s) in degree. If a vector is provided, then a random element will be sampled each time the operation is applied.\n\nSee also\n\nShearY, CropNative, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# shear horizontally exactly 5 degree\naugment(img, ShearX(5))\n\n# shear horizontally between 10 and 20 degree to the right\naugment(img, ShearX(10:20))\n\n# shear horizontally one of the five specified degrees\naugment(img, ShearX([-10, -5, 0, 5, 10]))\n\n\n\n" -}, - -{ - "location": "operations/shearx/#ShearX-1", - "page": "ShearX: Shear horizontally", - "title": "ShearX: Shear horizontally", - "category": "section", - "text": "ShearXIt will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.include(\"optable.jl\")\n@optable ShearX(10)It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.include(\"optable.jl\")\n@optable 10 => ShearX(-10:10)" -}, - -{ - "location": "operations/sheary/#", - "page": "ShearY: Shear vertically", - "title": "ShearY: Shear vertically", - "category": "page", - "text": "" -}, - -{ - "location": "operations/sheary/#Augmentor.ShearY", - "page": "ShearY: Shear vertically", - "title": "Augmentor.ShearY", - "category": "type", - "text": "ShearY <: Augmentor.AffineOperation\n\nDescription\n\nShear the image vertically for the given degree. This operation can only be performed as an affine transformation and will in general cause other operations of the pipeline to use their affine formulation as well (if they have one).\n\nIt will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.\n\nUsage\n\nShearY(degree)\n\nArguments\n\ndegree : Real or AbstractVector of Real that denote the shearing angle(s) in degree. If a vector is provided, then a random element will be sampled each time the operation is applied.\n\nSee also\n\nShearX, CropNative, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# shear vertically exactly 5 degree\naugment(img, ShearY(5))\n\n# shear vertically between 10 and 20 degree upwards\naugment(img, ShearY(10:20))\n\n# shear vertically one of the five specified degrees\naugment(img, ShearY([-10, -5, 0, 5, 10]))\n\n\n\n" -}, - -{ - "location": "operations/sheary/#ShearY-1", - "page": "ShearY: Shear vertically", - "title": "ShearY: Shear vertically", - "category": "section", - "text": "ShearYIt will always perform the transformation around the center of the image. This can be particularly useful when combining the operation with CropNative.include(\"optable.jl\")\n@optable ShearY(10)It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.include(\"optable.jl\")\n@optable 10 => ShearY(-10:10)" -}, - -{ - "location": "operations/scale/#", - "page": "Scale: Relative resizing", - "title": "Scale: Relative resizing", - "category": "page", - "text": "" -}, - -{ - "location": "operations/scale/#Augmentor.Scale", - "page": "Scale: Relative resizing", - "title": "Augmentor.Scale", - "category": "type", - "text": "Scale <: Augmentor.AffineOperation\n\nDescription\n\nMultiplies the image height and image width by the specified factors. This means that the size of the output image depends on the size of the input image.\n\nThe provided factors can either be numbers or vectors of numbers.\n\nIf numbers are provided, then the operation is deterministic and will always scale the input image with the same factors.\nIn the case vectors are provided, then each time the operation is applied a valid index is sampled and the elements corresponding to that index are used as scaling factors.\n\nThe scaling is performed relative to the image center, which can be useful when following the operation with CropNative.\n\nUsage\n\nScale(factors)\n\nScale(factors...)\n\nArguments\n\nfactors : NTuple or Vararg of Real or AbstractVector that denote the scale factor(s) for each array dimension. If only one variable is specified it is assumed that height and width should be scaled by the same factor(s).\n\nSee also\n\nZoom, Resize, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# half the image size\naugment(img, Scale(0.5))\n\n# uniformly scale by a random factor from 1.2, 1.3, or 1.4\naugment(img, Scale([1.2, 1.3, 1.4]))\n\n# scale by either 0.5x0.7 or by 0.6x0.8\naugment(img, Scale([0.5, 0.6], [0.7, 0.8]))\n\n\n\n" -}, - -{ - "location": "operations/scale/#Scale-1", - "page": "Scale: Relative resizing", - "title": "Scale: Relative resizing", - "category": "section", - "text": "Scaleinclude(\"optable.jl\")\n@optable Scale(0.9,0.5)In the case that only a single scale factor is specified, the operation will assume that the intention is to scale all dimensions uniformly by that factor.include(\"optable.jl\")\n@optable Scale(1.2)It is also possible to pass some abstract vector(s) to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.include(\"optable.jl\")\n@optable 10 => Scale(0.9:0.05:1.2)" -}, - -{ - "location": "operations/zoom/#", - "page": "Zoom: Scale without resize", - "title": "Zoom: Scale without resize", - "category": "page", - "text": "" -}, - -{ - "location": "operations/zoom/#Augmentor.Zoom", - "page": "Zoom: Scale without resize", - "title": "Augmentor.Zoom", - "category": "type", - "text": "Zoom <: Augmentor.ImageOperation\n\nDescription\n\nScales the image height and image width by the specified factors, but crops the image such that the original size is preserved.\n\nThe provided factors can either be numbers or vectors of numbers.\n\nIf numbers are provided, then the operation is deterministic and will always scale the input image with the same factors.\nIn the case vectors are provided, then each time the operation is applied a valid index is sampled and the elements corresponding to that index are used as scaling factors.\n\nIn contrast to Scale the size of the output image is the same as the size of the input image, while the content is scaled the same way. The same effect could be achieved by following a Scale with a CropSize, with the caveat that one would need to know the exact size of the input image before-hand.\n\nUsage\n\nZoom(factors)\n\nZoom(factors...)\n\nArguments\n\nfactors : NTuple or Vararg of Real or AbstractVector that denote the scale factor(s) for each array dimension. If only one variable is specified it is assumed that height and width should be scaled by the same factor(s).\n\nSee also\n\nScale, Resize, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# half the image size\naugment(img, Zoom(0.5))\n\n# uniformly scale by a random factor from 1.2, 1.3, or 1.4\naugment(img, Zoom([1.2, 1.3, 1.4]))\n\n# scale by either 0.5x0.7 or by 0.6x0.8\naugment(img, Zoom([0.5, 0.6], [0.7, 0.8]))\n\n\n\n" -}, - -{ - "location": "operations/zoom/#Zoom-1", - "page": "Zoom: Scale without resize", - "title": "Zoom: Scale without resize", - "category": "section", - "text": "Zoominclude(\"optable.jl\")\n@optable Zoom(1.2)It is also possible to pass some abstract vector to the constructor, in which case Augmentor will randomly sample one of its elements every time the operation is applied.include(\"optable.jl\")\n@optable 10 => Zoom(0.9:0.05:1.3)" -}, - -{ - "location": "operations/elasticdistortion/#", - "page": "ElasticDistortion: Smoothed random distortions", - "title": "ElasticDistortion: Smoothed random distortions", - "category": "page", - "text": "" -}, - -{ - "location": "operations/elasticdistortion/#Augmentor.ElasticDistortion", - "page": "ElasticDistortion: Smoothed random distortions", - "title": "Augmentor.ElasticDistortion", - "category": "type", - "text": "ElasticDistortion <: Augmentor.ImageOperation\n\nDescription\n\nDistorts the given image using a randomly (uniform) generated vector field of the given grid size. This field will be stretched over the given image when applied, which in turn will morph the original image into a new image using a linear interpolation of both the image and the vector field.\n\nIn contrast to [RandomDistortion], the resulting vector field is also smoothed using a Gaussian filter with of parameter sigma. This will result in a less chaotic vector field and thus resemble a more natural distortion.\n\nUsage\n\nElasticDistortion(gridheight, gridwidth, scale, sigma, [iter=1], [border=false], [norm=true])\n\nElasticDistortion(gridheight, gridwidth, scale; [sigma=2], [iter=1], [border=false], [norm=true])\n\nElasticDistortion(gridheight, [gridwidth]; [scale=0.2], [sigma=2], [iter=1], [border=false], [norm=true])\n\nArguments\n\ngridheight : The grid height of the displacement vector field. This effectively specifies the number of vertices along the Y dimension used as landmarks, where all the positions between the grid points are interpolated.\ngridwidth : The grid width of the displacement vector field. This effectively specifies the number of vertices along the Y dimension used as landmarks, where all the positions between the grid points are interpolated.\nscale : Optional. The scaling factor applied to all displacement vectors in the field. This effectively defines the \"strength\" of the deformation. There is no theoretical upper limit to this factor, but a value somewhere between 0.01 and 1.0 seem to be the most reasonable choices. Default to 0.2.\nsigma : Optional. Sigma parameter of the Gaussian filter. This parameter effectively controls the strength of the smoothing. Defaults to 2.\niter : Optional. The number of times the smoothing operation is applied to the displacement vector field. This is especially useful if border = false because the border will be reset to zero after each pass. Thus the displacement is a little less aggressive towards the borders of the image than it is towards its center. Defaults to 1.\nborder : Optional. Specifies if the borders should be distorted as well. If false, the borders of the image will be preserved. This effectively pins the outermost vertices on their original position and the operation thus only distorts the inner content of the image. Defaults to false.\nnorm : Optional. If true, the displacement vectors of the field will be normalized by the norm of the field. This will have the effect that the scale factor should be more or less independent of the grid size. Defaults to true.\n\nSee also\n\naugment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# distort with pinned borders\naugment(img, ElasticDistortion(15, 15; scale = 0.1))\n\n# distort everything more smoothly.\naugment(img, ElasticDistortion(10, 10; sigma = 4, iter=3, border=true))\n\n\n\n" -}, - -{ - "location": "operations/elasticdistortion/#ElasticDistortion-1", - "page": "ElasticDistortion: Smoothed random distortions", - "title": "ElasticDistortion: Smoothed random distortions", - "category": "section", - "text": "ElasticDistortioninclude(\"optable.jl\")\n@optable 10 => ElasticDistortion(15,15,0.1)include(\"optable.jl\")\n@optable 10 => ElasticDistortion(10,10,0.2,4,3,true)" -}, - -{ - "location": "operations/crop/#", - "page": "Crop: Subset image", - "title": "Crop: Subset image", - "category": "page", - "text": "" -}, - -{ - "location": "operations/crop/#Augmentor.Crop", - "page": "Crop: Subset image", - "title": "Augmentor.Crop", - "category": "type", - "text": "Crop <: Augmentor.ImageOperation\n\nDescription\n\nCrops out the area denoted by the specified pixel ranges.\n\nFor example the operation Crop(5:100, 2:10) would denote a crop for the rectangle that starts at x=2 and y=5 in the top left corner and ends at x=10 and y=100 in the bottom right corner. As we can see the y-axis is specified first, because that is how the image is stored in an array. Thus the order of the provided indices ranges needs to reflect the order of the array dimensions.\n\nUsage\n\nCrop(indices)\n\nCrop(indices...)\n\nArguments\n\nindices : NTuple or Vararg of UnitRange that denote the cropping range for each array dimension. This is very similar to how the indices for view are specified.\n\nSee also\n\nCropNative, CropSize, CropRatio, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = testpattern()\n300×400 Array{RGBA{N0f8},2}:\n[...]\n\njulia> augment(img, Crop(1:30, 361:400)) # crop upper right corner\n30×40 Array{RGBA{N0f8},2}:\n[...]\n\n\n\n" -}, - -{ - "location": "operations/crop/#Crop-1", - "page": "Crop: Subset image", - "title": "Crop: Subset image", - "category": "section", - "text": "Cropinclude(\"optable.jl\")\n@optable Crop(70:140,25:155)" -}, - -{ - "location": "operations/cropnative/#", - "page": "CropNative: Subset image", - "title": "CropNative: Subset image", - "category": "page", - "text": "" -}, - -{ - "location": "operations/cropnative/#Augmentor.CropNative", - "page": "CropNative: Subset image", - "title": "Augmentor.CropNative", - "category": "type", - "text": "CropNative <: Augmentor.ImageOperation\n\nDescription\n\nCrops out the area denoted by the specified pixel ranges.\n\nFor example the operation CropNative(5:100, 2:10) would denote a crop for the rectangle that starts at x=2 and y=5 in the top left corner of native space and ends at x=10 and y=100 in the bottom right corner of native space.\n\nIn contrast to Crop, the position x=1 y=1 is not necessarily located at the top left of the current image, but instead depends on the cumulative effect of the previous transformations. The reason for this is because affine transformations are usually performed around the center of the image, which is reflected in \"native space\". This is useful for combining transformations such as Rotate or ShearX with a crop around the center area.\n\nUsage\n\nCropNative(indices)\n\nCropNative(indices...)\n\nArguments\n\nindices : NTuple or Vararg of UnitRange that denote the cropping range for each array dimension. This is very similar to how the indices for view are specified.\n\nSee also\n\nCrop, CropSize, CropRatio, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# cropped at top left corner\naugment(img, Rotate(45) |> Crop(1:300, 1:400))\n\n# cropped around center of rotated image\naugment(img, Rotate(45) |> CropNative(1:300, 1:400))\n\n\n\n" -}, - -{ - "location": "operations/cropnative/#CropNative-1", - "page": "CropNative: Subset image", - "title": "CropNative: Subset image", - "category": "section", - "text": "CropNativeinclude(\"optable.jl\")\n@optable \"cropn1\" => (Rotate(45),Crop(1:210,1:280))\n@optable \"cropn2\" => (Rotate(45),CropNative(1:210,1:280))\ntbl = string(\n \"`(Rotate(45), Crop(1:210,1:280))` | `(Rotate(45), CropNative(1:210,1:280))`\\n\",\n \"-----|-----\\n\",\n \"![input](../assets/cropn1.png) | ![output](../assets/cropn2.png)\\n\"\n)\nMarkdown.parse(tbl)" -}, - -{ - "location": "operations/cropsize/#", - "page": "CropSize: Crop centered window", - "title": "CropSize: Crop centered window", - "category": "page", - "text": "" -}, - -{ - "location": "operations/cropsize/#Augmentor.CropSize", - "page": "CropSize: Crop centered window", - "title": "Augmentor.CropSize", - "category": "type", - "text": "CropSize <: Augmentor.ImageOperation\n\nDescription\n\nCrops out the area of the specified pixel size around the center of the input image.\n\nFor example the operation CropSize(10, 50) would denote a crop for a rectangle of height 10 and width 50 around the center of the input image.\n\nUsage\n\nCropSize(size)\n\nCropSize(size...)\n\nArguments\n\nsize : NTuple or Vararg of Int that denote the output size in pixel for each dimension.\n\nSee also\n\nCropRatio, Crop, CropNative, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# cropped around center of rotated image\naugment(img, Rotate(45) |> CropSize(300, 400))\n\n\n\n" -}, - -{ - "location": "operations/cropsize/#CropSize-1", - "page": "CropSize: Crop centered window", - "title": "CropSize: Crop centered window", - "category": "section", - "text": "CropSizeinclude(\"optable.jl\")\n@optable CropSize(45,225)" -}, - -{ - "location": "operations/cropratio/#", - "page": "CropRatio: Crop centered window", - "title": "CropRatio: Crop centered window", - "category": "page", - "text": "" -}, - -{ - "location": "operations/cropratio/#Augmentor.CropRatio", - "page": "CropRatio: Crop centered window", - "title": "Augmentor.CropRatio", - "category": "type", - "text": "CropRatio <: Augmentor.ImageOperation\n\nDescription\n\nCrops out the biggest area around the center of the given image such that the output image satisfies the specified aspect ratio (i.e. width divided by height).\n\nFor example the operation CropRatio(1) would denote a crop for the biggest square around the center of the image.\n\nFor randomly placed crops take a look at RCropRatio.\n\nUsage\n\nCropRatio(ratio)\n\nCropRatio(; ratio = 1)\n\nArguments\n\nratio::Number : Optional. A number denoting the aspect ratio. For example specifying ratio=16/9 would denote a 16:9 aspect ratio. Defaults to 1, which describes a square crop.\n\nSee also\n\nRCropRatio, CropSize, Crop, CropNative, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# crop biggest square around the image center\naugment(img, CropRatio(1))\n\n\n\n" -}, - -{ - "location": "operations/cropratio/#CropRatio-1", - "page": "CropRatio: Crop centered window", - "title": "CropRatio: Crop centered window", - "category": "section", - "text": "CropRatioinclude(\"optable.jl\")\n@optable CropRatio(1)" -}, - -{ - "location": "operations/rcropratio/#", - "page": "RCropRatio: Crop random window", - "title": "RCropRatio: Crop random window", - "category": "page", - "text": "" -}, - -{ - "location": "operations/rcropratio/#Augmentor.RCropRatio", - "page": "RCropRatio: Crop random window", - "title": "Augmentor.RCropRatio", - "category": "type", - "text": "RCropRatio <: Augmentor.ImageOperation\n\nDescription\n\nCrops out the biggest possible area at some random position of the given image, such that the output image satisfies the specified aspect ratio (i.e. width divided by height).\n\nFor example the operation RCropRatio(1) would denote a crop for the biggest possible square. If there is more than one such square, then one will be selected at random.\n\nUsage\n\nRCropRatio(ratio)\n\nRCropRatio(; ratio = 1)\n\nArguments\n\nratio::Number : Optional. A number denoting the aspect ratio. For example specifying ratio=16/9 would denote a 16:9 aspect ratio. Defaults to 1, which describes a square crop.\n\nSee also\n\nCropRatio, CropSize, Crop, CropNative, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# crop a randomly placed square of maxmimum size\naugment(img, RCropRatio(1))\n\n\n\n" -}, - -{ - "location": "operations/rcropratio/#RCropRatio-1", - "page": "RCropRatio: Crop random window", - "title": "RCropRatio: Crop random window", - "category": "section", - "text": "RCropRatioinclude(\"optable.jl\")\n@optable 10 => RCropRatio(1)" -}, - -{ - "location": "operations/resize/#", - "page": "Resize: Set static image size", - "title": "Resize: Set static image size", - "category": "page", - "text": "" -}, - -{ - "location": "operations/resize/#Augmentor.Resize", - "page": "Resize: Set static image size", - "title": "Augmentor.Resize", - "category": "type", - "text": "Resize <: Augmentor.ImageOperation\n\nDescription\n\nRescales the image to a fixed pre-specified pixel size.\n\nThis operation does not take any measures to preserve aspect ratio of the source image. Instead, the original image will simply be resized to the given dimensions. This is useful when one needs a set of images to all be of the exact same size.\n\nUsage\n\nResize(; height=64, width=64)\n\nResize(size)\n\nResize(size...)\n\nArguments\n\nsize : NTuple or Vararg of Int that denote the output size in pixel for each dimension.\n\nSee also\n\nCropSize, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\naugment(img, Resize(30, 40))\n\n\n\n" -}, - -{ - "location": "operations/resize/#Resize-1", - "page": "Resize: Set static image size", - "title": "Resize: Set static image size", - "category": "section", - "text": "Resizeinclude(\"optable.jl\")\n@optable Resize(100,150)" -}, - -{ - "location": "operations/converteltype/#", - "page": "ConvertEltype: Color conversion", - "title": "ConvertEltype: Color conversion", - "category": "page", - "text": "" -}, - -{ - "location": "operations/converteltype/#Augmentor.ConvertEltype", - "page": "ConvertEltype: Color conversion", - "title": "Augmentor.ConvertEltype", - "category": "type", - "text": "ConvertEltype <: Augmentor.Operation\n\nDescription\n\nConvert the element type of the given array/image into the given eltype. This operation is especially useful for converting color images to grayscale (or the other way around). That said, the operation is not specific to color types and can also be used for numeric arrays (e.g. with separated channels).\n\nNote that this is an element-wise convert function. Thus it can not be used to combine or separate color channels. Use SplitChannels or CombineChannels for those purposes.\n\nUsage\n\nConvertEltype(eltype)\n\nArguments\n\neltype : The eltype of the resulting array/image.\n\nSee also\n\nCombineChannels, SplitChannels, augment\n\nExamples\n\njulia> using Augmentor, Colors\n\njulia> A = rand(RGB, 10, 10) # three color channels\n10×10 Array{RGB{Float64},2}:\n[...]\n\njulia> augment(A, ConvertEltype(Gray{Float32})) # convert to grayscale\n10×10 Array{Gray{Float32},2}:\n[...]\n\n\n\n" -}, - -{ - "location": "operations/converteltype/#ConvertEltype-1", - "page": "ConvertEltype: Color conversion", - "title": "ConvertEltype: Color conversion", - "category": "section", - "text": "ConvertEltypeinclude(\"optable.jl\")\n@optable ConvertEltype(GrayA{N0f8})" -}, - -{ - "location": "operations/mapfun/#", - "page": "MapFun: Map function over Image", - "title": "MapFun: Map function over Image", - "category": "page", - "text": "" -}, - -{ - "location": "operations/mapfun/#Augmentor.MapFun", - "page": "MapFun: Map function over Image", - "title": "Augmentor.MapFun", - "category": "type", - "text": "MapFun <: Augmentor.Operation\n\nDescription\n\nMaps the given function over all individual array elements.\n\nThis means that the given function is called with an individual elements and is expected to return a transformed element that should take the original\'s place. This further implies that the function is expected to be unary. It is encouraged that the function should be consistent with its return type and type-stable.\n\nUsage\n\nMapFun(fun)\n\nArguments\n\nfun : The unary function that should be mapped over all individual array elements.\n\nSee also\n\nAggregateThenMapFun, ConvertEltype, augment\n\nExamples\n\nusing Augmentor, ColorTypes\nimg = testpattern()\n\n# subtract the constant RGBA value from each pixel\naugment(img, MapFun(px -> px - RGBA(0.5, 0.3, 0.7, 0.0)))\n\n# separate channels to scale each numeric element by a constant value\npl = SplitChannels() |> MapFun(el -> el * 0.5) |> CombineChannels(RGBA)\naugment(img, pl)\n\n\n\n" -}, - -{ - "location": "operations/mapfun/#MapFun-1", - "page": "MapFun: Map function over Image", - "title": "MapFun: Map function over Image", - "category": "section", - "text": "MapFun" -}, - -{ - "location": "operations/aggmapfun/#", - "page": "AggregateThenMapFun: Aggregate and Map over Image", - "title": "AggregateThenMapFun: Aggregate and Map over Image", - "category": "page", - "text": "" -}, - -{ - "location": "operations/aggmapfun/#Augmentor.AggregateThenMapFun", - "page": "AggregateThenMapFun: Aggregate and Map over Image", - "title": "Augmentor.AggregateThenMapFun", - "category": "type", - "text": "AggregateThenMapFun <: Augmentor.Operation\n\nDescription\n\nCompute some aggregated value of the current image using the given function aggfun, and map that value over the current image using the given function mapfun.\n\nThis is particularly useful for achieving effects such as per-image normalization.\n\nUsage\n\nAggregateThenMapFun(aggfun, mapfun)\n\nArguments\n\naggfun : A function that takes the whole current image as input and which result will also be passed to mapfun. It should have a signature of img -> agg, where img will the the current image. What type and value agg should be is up to the user.\nmapfun : The binary function that should be mapped over all individual array elements. It should have a signature of (px, agg) -> new_px where px is a single element of the current image, and agg is the output of aggfun.\n\nSee also\n\nMapFun, ConvertEltype, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# subtract the average RGB value of the current image\naugment(img, AggregateThenMapFun(img -> mean(img), (px, agg) -> px - agg))\n\n\n\n" -}, - -{ - "location": "operations/aggmapfun/#AggregateThenMapFun-1", - "page": "AggregateThenMapFun: Aggregate and Map over Image", - "title": "AggregateThenMapFun: Aggregate and Map over Image", - "category": "section", - "text": "AggregateThenMapFun" -}, - -{ - "location": "operations/splitchannels/#", - "page": "SplitChannels: Separate color channels", - "title": "SplitChannels: Separate color channels", - "category": "page", - "text": "" -}, - -{ - "location": "operations/splitchannels/#Augmentor.SplitChannels", - "page": "SplitChannels: Separate color channels", - "title": "Augmentor.SplitChannels", - "category": "type", - "text": "SplitChannels <: Augmentor.Operation\n\nDescription\n\nSplits out the color channels of the given image using the function ImageCore.channelview. This will effectively create a new array dimension for the colors in the front. In contrast to ImageCore.channelview it will also result in a new dimension for gray images.\n\nThis operation is mainly useful at the end of a pipeline in combination with PermuteDims in order to prepare the image for the training algorithm, which often requires the color channels to be separate.\n\nUsage\n\nSplitChannels()\n\nSee also\n\nPermuteDims, CombineChannels, augment\n\nExamples\n\njulia> using Augmentor\n\njulia> img = testpattern()\n300×400 Array{RGBA{N0f8},2}:\n[...]\n\njulia> augment(img, SplitChannels())\n4×300×400 Array{N0f8,3}:\n[...]\n\njulia> augment(img, SplitChannels() |> PermuteDims(3,2,1))\n400×300×4 Array{N0f8,3}:\n[...]\n\n\n\n" -}, - -{ - "location": "operations/splitchannels/#SplitChannels-1", - "page": "SplitChannels: Separate color channels", - "title": "SplitChannels: Separate color channels", - "category": "section", - "text": "SplitChannels" -}, - -{ - "location": "operations/combinechannels/#", - "page": "ComineChannels: Combine color channels", - "title": "ComineChannels: Combine color channels", - "category": "page", - "text": "" -}, - -{ - "location": "operations/combinechannels/#Augmentor.CombineChannels", - "page": "ComineChannels: Combine color channels", - "title": "Augmentor.CombineChannels", - "category": "type", - "text": "CombineChannels <: Augmentor.Operation\n\nDescription\n\nCombines the first dimension of a given array into a colorant of type colortype using the function ImageCore.colorview. The main difference is that a separate color channel is also expected for Gray images.\n\nThe shape of the input image has to be appropriate for the given colortype, which also means that the separated color channel has to be the first dimension of the array. See PermuteDims if that is not the case.\n\nUsage\n\nCombineChannels(colortype)\n\nArguments\n\ncolortype : The color type of the resulting image. Must be a subtype of ColorTypes.Colorant and match the color channel of the given image.\n\nSee also\n\nSplitChannels, PermuteDims, augment\n\nExamples\n\njulia> using Augmentor, Colors\n\njulia> A = rand(3, 10, 10) # three color channels\n3×10×10 Array{Float64,3}:\n[...]\n\njulia> augment(A, CombineChannels(RGB))\n10×10 Array{RGB{Float64},2}:\n[...]\n\njulia> B = rand(1, 10, 10) # singleton color channel\n1×10×10 Array{Float64,3}:\n[...]\n\njulia> augment(B, CombineChannels(Gray))\n10×10 Array{Gray{Float64},2}:\n[...]\n\n\n\n" -}, - -{ - "location": "operations/combinechannels/#CombineChannels-1", - "page": "ComineChannels: Combine color channels", - "title": "ComineChannels: Combine color channels", - "category": "section", - "text": "CombineChannels" -}, - -{ - "location": "operations/permutedims/#", - "page": "PermuteDims: Change dimension order", - "title": "PermuteDims: Change dimension order", - "category": "page", - "text": "" -}, - -{ - "location": "operations/permutedims/#Augmentor.PermuteDims", - "page": "PermuteDims: Change dimension order", - "title": "Augmentor.PermuteDims", - "category": "type", - "text": "PermuteDims <: Augmentor.Operation\n\nDescription\n\nPermute the dimensions of the given array with the predefined permutation perm. This operation is particularly useful if the order of the dimensions needs to be different than the default \"julian\" layout (described below).\n\nAugmentor expects the given images to be in vertical-major layout for which the colors are encoded in the element type itself. Many deep learning frameworks however require their input in a different order. For example it is not untypical that separate color channels are expected to be encoded in the third dimension.\n\nUsage\n\nPermuteDims(perm)\n\nPermuteDims(perm...)\n\nArguments\n\nperm : The concrete dimension permutation that should be used. Has to be specified as a Vararg{Int} or as a NTuple of Int. The length of perm has to match the number of dimensions of the expected input image to that operation.\n\nSee also\n\nSplitChannels, CombineChannels, augment\n\nExamples\n\njulia> using Augmentor, Colors\n\njulia> A = rand(10, 5, 3) # width=10, height=5, and 3 color channels\n10×5×3 Array{Float64,3}:\n[...]\n\njulia> img = augment(A, PermuteDims(3,2,1) |> CombineChannels(RGB))\n5×10 Array{RGB{Float64},2}:\n[...]\n\njulia> img2 = testpattern()\n300×400 Array{RGBA{N0f8},2}:\n[...]\n\njulia> B = augment(img2, SplitChannels() |> PermuteDims(3,2,1))\n400×300×4 Array{N0f8,3}:\n[...]\n\n\n\n" -}, - -{ - "location": "operations/permutedims/#PermuteDims-1", - "page": "PermuteDims: Change dimension order", - "title": "PermuteDims: Change dimension order", - "category": "section", - "text": "PermuteDims" -}, - -{ - "location": "operations/reshape/#", - "page": "Reshape: Reinterpret shape", - "title": "Reshape: Reinterpret shape", - "category": "page", - "text": "" -}, - -{ - "location": "operations/reshape/#Augmentor.Reshape", - "page": "Reshape: Reinterpret shape", - "title": "Augmentor.Reshape", - "category": "type", - "text": "Reshape <: Augmentor.Operation\n\nDescription\n\nReinterpret the shape of the given array of numbers or colorants. This is useful for example to create singleton-dimensions that deep learning frameworks may need for colorless images, or for converting an image array to a feature vector (and vice versa).\n\nUsage\n\nReshape(dims)\n\nReshape(dims...)\n\nArguments\n\ndims : The new sizes for each dimension of the output image. Has to be specified as a Vararg{Int} or as a NTuple of Int.\n\nSee also\n\nCombineChannels, augment\n\nExamples\n\njulia> using Augmentor, Colors\n\njulia> A = rand(10,10)\n10×10 Array{Float64,2}:\n[...]\n\njulia> augment(A, Reshape(10,10,1)) # add trailing singleton dimension\n10×10×1 Array{Float64,3}:\n[...]\n\n\n\n" -}, - -{ - "location": "operations/reshape/#Reshape-1", - "page": "Reshape: Reinterpret shape", - "title": "Reshape: Reinterpret shape", - "category": "section", - "text": "Reshape" -}, - -{ - "location": "operations/noop/#", - "page": "NoOp: Identity function", - "title": "NoOp: Identity function", - "category": "page", - "text": "" -}, - -{ - "location": "operations/noop/#Augmentor.NoOp", - "page": "NoOp: Identity function", - "title": "Augmentor.NoOp", - "category": "type", - "text": "NoOp <: Augmentor.AffineOperation\n\nIdentity transformation that does not do anything with the given image, but instead passes it along unchanged (without copying).\n\nUsually used in combination with Either to denote a \"branch\" that does not perform any computation.\n\n\n\n" -}, - -{ - "location": "operations/noop/#NoOp-1", - "page": "NoOp: Identity function", - "title": "NoOp: Identity function", - "category": "section", - "text": "NoOp" -}, - -{ - "location": "operations/cacheimage/#", - "page": "CacheImage: Buffer current state", - "title": "CacheImage: Buffer current state", - "category": "page", - "text": "" -}, - -{ - "location": "operations/cacheimage/#Augmentor.CacheImage", - "page": "CacheImage: Buffer current state", - "title": "Augmentor.CacheImage", - "category": "type", - "text": "CacheImage <: Augmentor.ImageOperation\n\nDescription\n\nWrite the current state of the image into the working memory. Optionally a user has the option to specify a preallocated buffer to write the image into. Note that if a buffer is provided, then it has to be of the correct size and eltype.\n\nEven without a preallocated buffer it can be beneficial in some situations to cache the image. An example for such a scenario is when chaining a number of affine transformations after an elastic distortion, because performing that lazily requires nested interpolation.\n\nUsage\n\nCacheImage()\n\nCacheImage(buffer)\n\nArguments\n\nbuffer : Optional. A preallocated AbstractArray of the appropriate size and eltype.\n\nSee also\n\naugment\n\nExamples\n\nusing Augmentor\n\n# make pipeline that forces caching after elastic distortion\npl = ElasticDistortion(3,3) |> CacheImage() |> Rotate(-10:10) |> ShearX(-5:5)\n\n# cache output of elastic distortion into the allocated\n# 20x20 Matrix{Float64}. Note that for this case this assumes that\n# the input image is also a 20x20 Matrix{Float64}\npl = ElasticDistortion(3,3) |> CacheImage(zeros(20,20)) |> Rotate(-10:10)\n\n# convenience syntax with the same effect as above.\npl = ElasticDistortion(3,3) |> zeros(20,20) |> Rotate(-10:10)\n\n\n\n" -}, - -{ - "location": "operations/cacheimage/#CacheImage-1", - "page": "CacheImage: Buffer current state", - "title": "CacheImage: Buffer current state", - "category": "section", - "text": "CacheImage" -}, - -{ - "location": "operations/either/#", - "page": "Either: Stochastic branches", - "title": "Either: Stochastic branches", - "category": "page", - "text": "" -}, - -{ - "location": "operations/either/#Augmentor.Either", - "page": "Either: Stochastic branches", - "title": "Augmentor.Either", - "category": "type", - "text": "Either <: Augmentor.ImageOperation\n\nDescription\n\nChooses between the given operations at random when applied. This is particularly useful if one for example wants to first either rotate the image 90 degree clockwise or anticlockwise (but never both), and then apply some other operation(s) afterwards.\n\nWhen compiling a pipeline, Either will analyze the provided operations in order to identify the preferred formalism to use when applied. The chosen formalism is chosen such that it is supported by all given operations. This way the output of applying Either will be inferable and the whole pipeline will remain type-stable (even though randomness is involved).\n\nBy default each specified image operation has the same probability of occurrence. This default behaviour can be overwritten by specifying the chance manually.\n\nUsage\n\nEither(operations, [chances])\n\nEither(operations...; [chances])\n\nEither(pairs...)\n\n*(operations...)\n\n*(pairs...)\n\nArguments\n\noperations : NTuple or Vararg of Augmentor.ImageOperation that denote the possible choices to sample from when applied.\nchances : Optional. Denotes the relative chances for an operation to be sampled. Has to contain the same number of elements as operations. Either an NTuple of numbers if specified as positional argument, or alternatively a AbstractVector of numbers if specified as a keyword argument. If omitted every operation will have equal probability of occurring.\npairs : Vararg of Pair{<:Real,<:Augmentor.ImageOperation}. A compact way to specify an operation and its chance of occurring together.\n\nSee also\n\nNoOp, augment\n\nExamples\n\nusing Augmentor\nimg = testpattern()\n\n# all three operations have equal chance of occuring\naugment(img, Either(FlipX(), FlipY(), NoOp()))\naugment(img, FlipX() * FlipY() * NoOp())\n\n# NoOp is twice as likely as either FlipX or FlipY\naugment(img, Either(1=>FlipX(), 1=>FlipY(), 2=>NoOp()))\naugment(img, Either(FlipX(), FlipY(), NoOp(), chances=[1,1,2]))\naugment(img, Either((FlipX(), FlipY(), NoOp()), (1,1,2)))\naugment(img, (1=>FlipX()) * (1=>FlipY()) * (2=>NoOp()))\n\n\n\n" -}, - -{ - "location": "operations/either/#Either-1", - "page": "Either: Stochastic branches", - "title": "Either: Stochastic branches", - "category": "section", - "text": "Either" -}, - -{ - "location": "generated/mnist_elastic/#", - "page": "MNIST: Elastic Distortions", - "title": "MNIST: Elastic Distortions", - "category": "page", - "text": "" -}, - -{ - "location": "generated/mnist_elastic/#elastic-1", - "page": "MNIST: Elastic Distortions", - "title": "MNIST: Elastic Distortions", - "category": "section", - "text": "In this example we are going to use Augmentor on the famous MNIST database of handwritten digits [MNIST1998] to reproduce the elastic distortions discussed in [SIMARD2003]. It may be interesting to point out, that the way Augmentor implements distortions is a little different to how it is described by the authors of the paper. This is for a couple of reasons, most notably that we want the parameters for our deformations to be independent of the size of image it is applied on. As a consequence the parameter-numbers specified in the paper are not 1-to-1 transferable to Augmentor.If the effects are sensible for the dataset, then applying elastic distortions can be a really effective way to improve the generalization ability of the network. That said, our implementation of ElasticDistortion has a lot of possible parameters to choose from. To that end, we will introduce a simple strategy for interactively exploring the parameter space on our dataset of interest.note: Note\nThis tutorial was designed to be performed in a Juypter notebook. You can find a link to the Juypter version of this tutorial in the top right corner of this page." -}, - -{ - "location": "generated/mnist_elastic/#Loading-the-MNIST-Trainingset-1", - "page": "MNIST: Elastic Distortions", - "title": "Loading the MNIST Trainingset", - "category": "section", - "text": "In order to access and visualize the MNIST images we employ the help of two additional Julia packages. In the interest of time and space we will not go into great detail about their functionality. Feel free to click on their respective names to find out more information about the utility they can provide.Images.jl will provide us with the necessary tools for working with image data in Julia.\nMLDatasets.jl has an MNIST submodule that offers a convenience interface to read the MNIST database.The function MNIST.traintensor returns the MNIST training images corresponding to the given indices as a multi-dimensional array. These images are stored in the native horizontal-major memory layout as a single floating point array, where all values are scaled to be between 0.0 and 1.0.using Images, MLDatasets\ntrain_tensor = MNIST.traintensor()\n@show summary(train_tensor);\nnothing # hideThis horizontal-major format is the standard way of utilizing this dataset for training machine learning models. In this tutorial, however, we are more interested in working with the MNIST images as actual Julia images in vertical-major layout, and as black digits on white background.We can convert the \"tensor\" to a Colorant array using the provided function MNIST.convert2image. This way, Julia knows we are dealing with image data and can tell programming environments such as Juypter how to visualize it. If you are working in the terminal you may want to use the package ImageInTerminal.jltrain_images = MNIST.convert2image(train_tensor)\nimg_1 = train_images[:,:,1] # show first image\nsave(\"mnist_1.png\",repeat(img_1,inner=(4,4))) # hide\nnothing # hide(Image: first image)" -}, - -{ - "location": "generated/mnist_elastic/#Visualizing-the-Effects-1", - "page": "MNIST: Elastic Distortions", - "title": "Visualizing the Effects", - "category": "section", - "text": "Before applying an operation (or pipeline of operations) on some dataset to train a network, we strongly recommend investing some time in selecting a decent set of hyper parameters for the operation(s). A useful tool for tasks like this is the package Interact.jl. We will use this package to define a number of widgets for controlling the parameters to our operation.Note that while the code below only focuses on configuring the parameters of a single operation, specifically ElasticDistortion, it could also be adapted to tweak a whole pipeline. Take a look at the corresponding section in High-level Interface for more information on how to define and use a pipeline.# These two package will provide us with the capabilities\n# to perform interactive visualisations in a jupyter notebook\nusing Augmentor, Interact, Reactive\n\n# The manipulate macro will turn the parameters of the\n# loop into interactive widgets.\n@manipulate for\n unpaused = true,\n ticks = fpswhen(signal(unpaused), 5.),\n image_index = 1:100,\n grid_size = 3:20,\n scale = .1:.1:.5,\n sigma = 1:5,\n iterations = 1:6,\n free_border = true\n op = ElasticDistortion(grid_size, grid_size, # equal width & height\n sigma = sigma,\n scale = scale,\n iter = iterations,\n border = free_border)\n augment(train_images[:, :, image_index], op)\nend\nnothing # hideExecuting the code above in a Juypter notebook will result in the following interactive visualisation. You can now use the sliders to investigate the effects that different parameters have on the MNIST training images.tip: Tip\nYou should always use your training set to do this kind of visualisation (not the test test!). Otherwise you are likely to achieve overly optimistic (i.e. biased) results during training.(Image: interact)Congratulations! With just a few simple lines of code, you created a simple interactive tool to visualize your image augmentation pipeline. Once you found a set of parameters that you think are appropriate for your dataset you can go ahead and train your model." -}, - -{ - "location": "generated/mnist_elastic/#References-1", - "page": "MNIST: Elastic Distortions", - "title": "References", - "category": "section", - "text": "[MNIST1998]: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. \"The MNIST database of handwritten digits\" Website. 1998.[SIMARD2003]: Simard, Patrice Y., David Steinkraus, and John C. Platt. \"Best practices for convolutional neural networks applied to visual document analysis.\" ICDAR. Vol. 3. 2003." -}, - -{ - "location": "generated/mnist_knet/#", - "page": "MNIST: Knet.jl CNN", - "title": "MNIST: Knet.jl CNN", - "category": "page", - "text": "" -}, - -{ - "location": "generated/mnist_knet/#MNIST:-Knet.jl-CNN-1", - "page": "MNIST: Knet.jl CNN", - "title": "MNIST: Knet.jl CNN", - "category": "section", - "text": "In this tutorial we will adapt the MNIST example from Knet.jl to utilize a custom augmentation pipeline. In order to showcase the effect that image augmentation can have on a neural network\'s ability to generalize, we will limit the training set to just the first 500 images (of the available 60,000!). For more information on the dataset see [MNIST1998].note: Note\nThis tutorial is also available as a Juypter notebook. You can find a link to the Juypter version of this tutorial in the top right corner of this page." -}, - -{ - "location": "generated/mnist_knet/#Preparing-the-MNIST-dataset-1", - "page": "MNIST: Knet.jl CNN", - "title": "Preparing the MNIST dataset", - "category": "section", - "text": "In order to access, prepare, and visualize the MNIST images we employ the help of three additional Julia packages. In the interest of time and space we will not go into great detail about their functionality. Feel free to click on their respective names to find out more information about the utility they can provide.MLDatasets.jl has an MNIST submodule that offers a convenience interface to read the MNIST database.\nImages.jl will provide us with the necessary tools to process and display the image data in Julia / Juypter.\nMLDataUtils.jl implements a variety of functions to convert and partition Machine Learning datasets. This will help us prepare the MNIST data to be used with Knet.jl.using Images, MLDatasets, MLDataUtils\nsrand(42);\nnothing # hideAs you may have seen previously in the elastic distortions tutorial, the function MNIST.traintensor returns the MNIST training images corresponding to the given indices as a multi-dimensional array. These images are stored in the native horizontal-major memory layout as a single array. Because we specify that the eltype of that array should be Float32, all the individual values are scaled to be between 0.0 and 1.0. Also note, how the observations are laid out along the last array dimension@show summary(MNIST.traintensor(Float32, 1:500));\nnothing # hideThe corresponding label of each image is stored as an integer value between 0 and 9. That means that if the label has the value 3, then the corresponding image is known to be a handwritten \"3\". To show a more concrete example, the following code reveals that the first training image denotes a \"5\" and the second training image a \"0\" (etc).@show summary(MNIST.trainlabels(1:500))\nprintln(\"First eight labels: \", join(MNIST.trainlabels(1:8),\", \"))For Knet we will require a slightly format for the images and also the labels. More specifically, we add an additional singleton dimension of length 1 to our image array. Think of this as our single color channel (because MNIST images are gray). Additionally we will convert our labels to proper 1-based indices. This is because some functions provided by Knet expect the labels to be in this format. We will do all this by creating a little utility function that we will name prepare_mnist.\"\"\"\n prepare_mnist(images, labels) -> (X, Y)\n\nChange the dimension layout x1×x2×N of the given array\n`images` to x1×x2×1×N and return the result as `X`.\nThe given integer vector `labels` is transformed into\nan integer vector denoting 1-based class indices.\n\"\"\"\nfunction prepare_mnist(images, labels)\n X = reshape(images, (28, 28, 1, :))\n Y = convertlabel(LabelEnc.Indices{Int8}, labels, 0:9)\n X, Y\nend\nnothing # hideWith prepare_mnist defined, we can now use it in conjunction with the functions in the MLDatasets.MNIST sub-module to load and prepare our training set. Recall that for this tutorial only the first 500 images of the training set will be used.train_x, train_y = prepare_mnist(MNIST.traintensor(Float32, 1:500), MNIST.trainlabels(1:500))\n@show summary(train_x) summary(train_y);\n[MNIST.convert2image(train_x[:,:,1,i]) for i in 1:8]\ntmp = hcat(ans...) # hide\nsave(\"mnist_knet_train.png\",repeat(tmp, inner=(4,4))) # hide\nnothing # hide(Image: training images)Similarly, we use MNIST.testtensor and MNIST.testlabels to load the full MNIST test set. We will utilize that data to measure how well the network is able to generalize with and without augmentation.test_x, test_y = prepare_mnist(MNIST.testtensor(Float32), MNIST.testlabels())\n@show summary(test_x) summary(test_y);\n[MNIST.convert2image(test_x[:,:,1,i]) for i in 1:8]\ntmp = hcat(ans...) # hide\nsave(\"mnist_knet_test.png\",repeat(tmp, inner=(4,4))) # hide\nnothing # hide(Image: test images)" -}, - -{ - "location": "generated/mnist_knet/#Defining-the-Network-1", - "page": "MNIST: Knet.jl CNN", - "title": "Defining the Network", - "category": "section", - "text": "With the dataset prepared, we can now define and instantiate our neural network. To keep things simple, we will use the same convolutional network as defined in the MNIST example of the Knet.jl package.using Knet\nnothing # hideThe first thing we will do is define the forward pass through the network. This will effectively outline the computation graph of the network architecture. Note how this does not define some details, such as the number of neurons per layer. We will define those later when initializing our vector of weight arrays w.\"\"\"\n forward(w, x) -> a\n\nCompute the forward pass for the given minibatch `x` by using the\nneural network parameters in `w`. The resulting (unnormalized)\nactivations of the last layer are returned as `a`.\n\"\"\"\nfunction forward(w, x)\n # conv1 (2x2 maxpool)\n a1 = pool(relu.(conv4(w[1], x) .+ w[2]))\n # conv2 (2x2 maxpool)\n a2 = pool(relu.(conv4(w[3], a1) .+ w[4]))\n # dense1 (relu)\n a3 = relu.(w[5] * mat(a2) .+ w[6])\n # dense2 (identity)\n a4 = w[7] * a3 .+ w[8]\n return a4\nend\nnothing # hideIn order to be able to train our network we need to choose a cost function. Because this is a classification problem we will use the negative log-likelihood (provided by Knet.nll). With the cost function defined we can the simply use the higher-order function grad to create a new function costgrad that computes us the corresponding gradients.\"\"\"\n cost(w, x, y) -> AbstractFloat\n\nCompute the per-instance negative log-likelihood for the data\nin the minibatch `(x, y)` given the network with the current\nparameters in `w`.\n\"\"\"\ncost(w, x, y) = nll(forward(w, x), y)\ncostgrad = grad(cost)\nnothing # hideAside from the cost function that we need for training, we would also like a more interpretable performance measurement. In this tutorial we will use \"accuracy\" for its simplicity and because we know that the class distribution for MNIST is close to uniform.\"\"\"\n acc(w, X, Y; [batchsize]) -> Float64\n\nCompute the accuracy for the data in `(X,Y)` given the network\nwith the current parameters in `w`. The resulting value is\ncomputed by iterating over the data in minibatches of size\n`batchsize`.\n\"\"\"\nfunction acc(w, X, Y; batchsize = 100)\n sum = 0; count = 0\n for (x_cpu, y) in eachbatch((X, Y), maxsize = batchsize)\n x = KnetArray{Float32}(x_cpu)\n sum += Int(accuracy(forward(w,x), y, average = false))\n count += length(y)\n end\n return sum / count\nend\nnothing # hideBefore we can train or even just use our network, we need to define how we initialize w, which is our the vector of parameter arrays. The dimensions of these individual arrays specify the filter sizes and number of neurons. It can be helpful to compare the indices here with the indices used in our forward function to see which array corresponds to which computation node of our network.function weights(atype = KnetArray{Float32})\n w = Array{Any}(8)\n # conv1\n w[1] = xavier(5,5,1,20)\n w[2] = zeros(1,1,20,1)\n # conv2\n w[3] = xavier(5,5,20,50)\n w[4] = zeros(1,1,50,1)\n # dense1\n w[5] = xavier(500,800)\n w[6] = zeros(500,1)\n # dense2\n w[7] = xavier(10,500)\n w[8] = zeros(10,1)\n return map(a->convert(atype,a), w)\nend\nnothing # hide" -}, - -{ - "location": "generated/mnist_knet/#Training-without-Augmentation-1", - "page": "MNIST: Knet.jl CNN", - "title": "Training without Augmentation", - "category": "section", - "text": "In order to get an intuition for how useful augmentation can be, we need a sensible baseline to compare to. To that end, we will first train the network we just defined using only the (unaltered) 500 training examples.The package ValueHistories.jl will help us record the accuracy during the training process. We will use those logs later to visualize the differences between having augmentation or no augmentation.using ValueHistoriesTo keep things simple, we will not overly optimize our training function. Thus, we will be content with using a closure. Because both, the baseline and the augmented version, will share this \"inefficiency\", we should still get a decent enough picture of their performance differences.function train_baseline(; epochs = 500, batchsize = 100, lr = .03)\n w = weights()\n log = MVHistory()\n for epoch in 1:epochs\n for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)\n batch_x = KnetArray{Float32}(batch_x_cpu)\n g = costgrad(w, batch_x, batch_y)\n Knet.update!(w, g, lr = lr)\n end\n\n if (epoch % 5) == 0\n train = acc(w, train_x, train_y)\n test = acc(w, test_x, test_y)\n @trace log epoch train test\n msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n println(msg)\n end\n end\n log\nend\nnothing # hideAside from the accuracy, we will also keep an eye on the training time. In particular we would like to see if and how the addition of augmentation causes our training time to increase.train_baseline(epochs=1) # warm-up\nbaseline_log = @time train_baseline(epochs=200);\nnothing # hideAs we can see, the accuracy on the training set is around a 100%, while the accuracy on the test set peaks around 90%. For a mere 500 training examples, this isn\'t actually that bad of a result." -}, - -{ - "location": "generated/mnist_knet/#Integrating-Augmentor-1", - "page": "MNIST: Knet.jl CNN", - "title": "Integrating Augmentor", - "category": "section", - "text": "Now that we have a network architecture with a baseline to compare to, let us finally see what it takes to add Augmentor to our experiment. First, we need to include the package to our experiment.using AugmentorThe next step, and maybe the most human-hour consuming part of adding image augmentation to a prediction problem, is to design and select a sensible augmentation pipeline. Take a look at the elastic distortions tutorial for an example of how to do just that.For this example, we already choose a quite complicated but promising augmentation pipeline for you. This pipeline was designed to yield a large variation of effects as well as to showcase how even deep pipelines are quite efficient in terms of performance.pl = Reshape(28,28) |>\n PermuteDims(2,1) |>\n ShearX(-5:5) * ShearY(-5:5) |>\n Rotate(-15:15) |>\n CropSize(28,28) |>\n Zoom(0.9:0.1:1.2) |>\n CacheImage() |>\n ElasticDistortion(10) |>\n PermuteDims(2,1) |>\n Reshape(28,28,1)Most of the used operations are quite self explanatory, but there are some details about this pipeline worth pointing out explicitly.We use the operation PermuteDims to convert the horizontal-major MNIST image to a julia-native vertical-major image. The vertical-major image is then processed and converted back to a horizontal-major array. We mainly do this here to showcase the option, but it is also to keep consistent with how the data is usually used in the literature. Alternatively, one could just work with the MNIST data in a vertical-major format all the way through without any issue.As counter-intuitive as it sounds, the operation CacheImage right before ElasticDistortion is actually used to improve performance. If we were to omit it, then the whole pipeline would be applied in one single pass. In this case, applying distortions on top of affine transformations lazily is in fact less efficient than using a temporary variable.With the pipeline now defined, let us quickly peek at what kind of effects we can achieve with it. In particular, lets apply the pipeline multiple times to the first training image and look at what kind of results it produces.[MNIST.convert2image(reshape(augment(train_x[:,:,:,1], pl), (28, 28))) for i in 1:8, j in 1:2]\ntmp = vcat(hcat(ans[:,1]...), hcat(ans[:,2]...)) # hide\nsave(\"mnist_knet_aug.png\",repeat(tmp, inner=(4,4))) # hide\nnothing # hide(Image: augmented samples)As we can see, we can achieve a wide range of effects, from more subtle to more pronounced. The important part is that all examples are still clearly representative of the true label.Next, we have to adapt the function train_baseline to make use of our augmentation pipeline. To integrate Augmentor efficiently, there are three necessary changes we have to make.Preallocate a buffer with the same size and element type that each batch has.\nbatch_x_aug = zeros(Float32, 28, 28, 1, batchsize)Add a call to augmentbatch! in the inner loop of the batch iterator using our pipeline and buffer.\naugmentbatch!(batch_x_aug, batch_x_org, pl)Replace batch_x_org with batch_x_aug in the constructor of KnetArray.\nbatch_x = KnetArray{Float32}(batch_x_aug)Applying these changes to our train_baseline function will give us something similar to the following function. Note how all the other parts of the function remain exactly the same as before.function train_augmented(; epochs = 500, batchsize = 100, lr = .03)\n w = weights()\n log = MVHistory()\n batch_x_aug = zeros(Float32, size(train_x,1), size(train_x,2), 1, batchsize)\n for epoch in 1:epochs\n for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)\n augmentbatch!(CPUThreads(), batch_x_aug, batch_x_cpu, pl)\n batch_x = KnetArray{Float32}(batch_x_aug)\n g = costgrad(w, batch_x, batch_y)\n Knet.update!(w, g, lr = lr)\n end\n\n if (epoch % 5) == 0\n train = acc(w, train_x, train_y)\n test = acc(w, test_x, test_y)\n @trace log epoch train test\n msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n println(msg)\n end\n end\n log\nend\nnothing # hideYou may have noticed in the code above that we also pass a CPUThreads() as the first argument to augmentbatch!. This instructs Augmentor to process the images of the batch in parallel using multi-threading. For this to work properly you will need to set the environment variable JULIA_NUM_THREADS to the number of threads you wish to use. You can check how many threads are used with the function Threads.nthreads()@show Threads.nthreads();\nnothing # hideNow that all pieces are in place, let us train our network once more. We will use the same parameters except that now instead of the original training images we will be using randomly augmented images. This will cause every epoch to be different.train_augmented(epochs=1) # warm-up\naugmented_log = @time train_augmented(epochs=200);\nnothing # hideAs we can see, our network reaches far better results on our testset than our baseline network did. However, we can also see that the training took quite a bit longer than before. This difference generally decreases as the complexity of the utilized neural network increases. Yet another way to improve performance (aside from simplifying the augmentation pipeline) would be to increase the number of available threads." -}, - -{ - "location": "generated/mnist_knet/#Improving-Performance-1", - "page": "MNIST: Knet.jl CNN", - "title": "Improving Performance", - "category": "section", - "text": "One of the most effective ways to make the most out of the available resources is to augment the next (couple) mini-batch while the current minibatch is being processed on the GPU. We can do this via julia\'s build in parallel computing capabilitiesFirst we need a worker process that will be responsible for augmenting our dataset each epoch. This worker also needs access to a couple of our packages# addprocs(1)\n# @everywhere using Augmentor, MLDataUtilsNext, we replace the inner eachbatch loop with a more complicated version using a RemoteChannel to exchange and queue the augmented data.function async_train_augmented(; epochs = 500, batchsize = 100, lr = .03)\n w = weights()\n log = MVHistory()\n for epoch in 1:epochs\n @sync begin\n local_ch = Channel{Tuple}(4) # prepare up to 4 minibatches in adavnce\n remote_ch = RemoteChannel(()->local_ch)\n @spawn begin\n # This block is executed on the worker process\n batch_x_aug = zeros(Float32, size(train_x,1), size(train_x,2), 1, batchsize)\n for (batch_x_cpu, batch_y) in eachbatch((train_x ,train_y), batchsize)\n # we are still using multithreading\n augmentbatch!(CPUThreads(), batch_x_aug, batch_x_cpu, pl)\n put!(remote_ch, (batch_x_aug, batch_y))\n end\n close(remote_ch)\n end\n @async begin\n # This block is executed on the main process\n for (batch_x_aug, batch_y) in local_ch\n batch_x = KnetArray{Float32}(batch_x_aug)\n g = costgrad(w, batch_x, batch_y)\n Knet.update!(w, g, lr = lr)\n end\n end\n end\n\n if (epoch % 5) == 0\n train = acc(w, train_x, train_y)\n test = acc(w, test_x, test_y)\n @trace log epoch train test\n msg = \"epoch \" * lpad(epoch,4) * \": train accuracy \" * rpad(round(train,3),5,\"0\") * \", test accuracy \" * rpad(round(test,3),5,\"0\")\n println(msg)\n end\n end\n log\nend\nnothing # hideNote that for this toy example the overhead of this approach is greater than the benefit." -}, - -{ - "location": "generated/mnist_knet/#Visualizing-the-Results-1", - "page": "MNIST: Knet.jl CNN", - "title": "Visualizing the Results", - "category": "section", - "text": "Before we end this tutorial, let us make use the Plots.jl package to visualize and discuss the recorded training curves. We will plot the accuracy curves of both networks side by side in order to get a good feeling about their differences.using Plots\npyplot()\nnothing # hidedefault(bg_outside=colorant\"#FFFFFF\") # hide\nplt = plot(\n plot(baseline_log, title=\"Baseline\", ylim=(.5,1)),\n plot(augmented_log, title=\"Augmented\", ylim=(.5,1)),\n size = (900, 400),\n xlab = \"Epoch\",\n ylab = \"Accuracy\",\n markersize = 1\n)\npng(plt, \"mnist_knet_curves.png\") # hide\nnothing # hide(Image: learning curves)Note how the accuracy on the (unaltered) training set increases faster for the baseline network than for the augmented one. This is to be expected, since our augmented network doesn\'t actually use the unaltered images for training, and thus has not actually seen them. Given this information, it is worth pointing out explicitly how the accuracy on training set is still greater than on the test set for the augmented network as well. This is also not a surprise, given that the augmented images are likely more similar to their original ones than to the test images.For the baseline network, the accuracy on the test set plateaus quite quickly (around 90%). For the augmented network on the other hand, it the accuracy keeps increasing for quite a while longer." -}, - -{ - "location": "generated/mnist_knet/#References-1", - "page": "MNIST: Knet.jl CNN", - "title": "References", - "category": "section", - "text": "[MNIST1998]: LeCun, Yan, Corinna Cortes, Christopher J.C. Burges. \"The MNIST database of handwritten digits\" Website. 1998." -}, - -{ - "location": "indices/#", - "page": "Indices", - "title": "Indices", - "category": "page", - "text": "" -}, - -{ - "location": "indices/#Functions-1", - "page": "Indices", - "title": "Functions", - "category": "section", - "text": "Order = [:function]" -}, - -{ - "location": "indices/#Types-1", - "page": "Indices", - "title": "Types", - "category": "section", - "text": "Order = [:type]" -}, - -{ - "location": "LICENSE/#", - "page": "LICENSE", - "title": "LICENSE", - "category": "page", - "text": "" -}, - -{ - "location": "LICENSE/#LICENSE-1", - "page": "LICENSE", - "title": "LICENSE", - "category": "section", - "text": "Markdown.parse_file(joinpath(@__DIR__, \"../LICENSE.md\"))" -}, - -]}