Skip to content

Commit

Permalink
Merge pull request #1005 from mila-udem/release-0.2
Browse files Browse the repository at this point in the history
Release 0.2
  • Loading branch information
rizar committed Feb 24, 2016
2 parents de526fe + 5902ba6 commit 7beb788
Show file tree
Hide file tree
Showing 56 changed files with 3,780 additions and 1,189 deletions.
3 changes: 2 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ before_install:
- # Setup Python environment with BLAS libraries
- wget -q http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
- chmod +x miniconda.sh
- ./miniconda.sh -b
- ./miniconda.sh -b -p $HOME/miniconda
- export PATH=$HOME/miniconda/bin:$PATH
- conda update -q --yes conda
- export FUEL_DATA_PATH=$TRAVIS_BUILD_DIR/data
Expand Down Expand Up @@ -54,6 +54,7 @@ script:
cd $TRAVIS_BUILD_DIR
git clone https://github.com/mila-udem/blocks-examples.git
cd blocks-examples
git checkout e0d7a0e5b60e802634161a63602673717c3e3c78
nose2 tests
fi
after_script:
Expand Down
25 changes: 25 additions & 0 deletions CONTRIBUTING.rst
Original file line number Diff line number Diff line change
@@ -1,3 +1,26 @@
Filing an issue
===============
If you are having a problem, then *before* filing an issue, please verify
the following:

* That you are using a **compatible version of Python** -- this means version
3.4 or newer for mainline Python. Legacy Python support is limited to 2.7 and
will eventually be dropped, and not all features may be available; users are
encouraged to move to Python 3.x as soon as possible.
* That you are using **the latest version of Theano** from the GitHub ``master``
branch. Blocks is developed concurrently with Theano's bleeding edge development
and many problems with using Blocks can be traced to using the latest stable
version of Theano (or an insufficiently recent GitHub checkout). Please see the
`Blocks installation instructions`_ for more details.
* You are using the latest Blocks (and Fuel_) from the GitHub ``master``
branch. If you are using ``stable``, then if possible, please check if your
problem persists if you switch to using ``master``. It may still be worth
filing the issue if your problem is fixed in ``master``, if it is a serious
enough problem to warrant backporting a fix to ``stable``.
* That your issue is about the software itself -- either a bug report, feature
request, question on how to accomplish a certain defined operation within
Blocks, etc. -- and not a general machine learning or neural networks question.

Making a pull request
=====================

Expand Down Expand Up @@ -49,6 +72,8 @@ mailing list and the GitHub issues to make sure the answer isn't out there
already.

.. _Blocks users mailing list: https://groups.google.com/forum/#!forum/blocks-users
.. _Blocks installation instructions: https://blocks.readthedocs.org/en/latest/setup.html
.. _Fuel: http://fuel.readthedocs.org/
.. _quick reference: https://blocks.readthedocs.org/en/latest/development/pull_request.html
.. _the documentation: https://blocks.readthedocs.org/en/latest/development/index.html#formatting-guidelines
.. _coding guidelines: https://blocks.readthedocs.org/en/latest/development/index.html#code-guidelines
Expand Down
6 changes: 2 additions & 4 deletions blocks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
"""The blocks library for parametrized Theano ops."""
# Scary warning: Adding code to this file can break namespace packages
# See https://pythonhosted.org/setuptools/setuptools.html#namespace-packages
__import__("pkg_resources").declare_namespace(__name__)
__version__ = '0.1.1'
import blocks.version
__version__ = blocks.version.version
43 changes: 24 additions & 19 deletions blocks/algorithms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
from blocks.graph import ComputationGraph
from blocks.roles import add_role, ALGORITHM_HYPERPARAMETER, ALGORITHM_BUFFER
from blocks.theano_expressions import l2_norm
from blocks.utils import dict_subset, pack, shared_floatx
from blocks.utils import (dict_subset, pack, shared_floatx,
shared_floatx_zeros_matching)

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -420,7 +421,7 @@ def __init__(self, momentum=0.):
add_role(self.momentum, ALGORITHM_HYPERPARAMETER)

def compute_step(self, parameter, previous_step):
velocity = shared_floatx(parameter.get_value() * 0., "velocity")
velocity = shared_floatx_zeros_matching(parameter, "velocity")
add_role(velocity, ALGORITHM_BUFFER)
step = self.momentum * velocity + previous_step
updates = [(velocity, step)]
Expand Down Expand Up @@ -487,11 +488,11 @@ def __init__(self, decay_rate=0.95, epsilon=1e-6):
add_role(self.epsilon, ALGORITHM_HYPERPARAMETER)

def compute_step(self, parameter, previous_step):
mean_square_step_tm1 = shared_floatx(parameter.get_value() * 0.,
"mean_square_step_tm1")
mean_square_step_tm1 = shared_floatx_zeros_matching(
parameter, "mean_square_step_tm1")
add_role(mean_square_step_tm1, ALGORITHM_BUFFER)
mean_square_delta_x_tm1 = shared_floatx(parameter.get_value() * 0.,
"mean_square_delta_x_tm1")
mean_square_delta_x_tm1 = shared_floatx_zeros_matching(
parameter, "mean_square_delta_x_tm1")
add_role(mean_square_delta_x_tm1, ALGORITHM_BUFFER)

mean_square_step_t = (
Expand Down Expand Up @@ -550,8 +551,8 @@ def __init__(self, decay_rate=0.9, max_scaling=1e5):
self.epsilon = 1. / max_scaling

def compute_step(self, parameter, previous_step):
mean_square_step_tm1 = shared_floatx(parameter.get_value() * 0.,
"mean_square_step_tm1")
mean_square_step_tm1 = shared_floatx_zeros_matching(
parameter, "mean_square_step_tm1")
add_role(mean_square_step_tm1, ALGORITHM_BUFFER)
mean_square_step_t = (
self.decay_rate * mean_square_step_tm1 +
Expand Down Expand Up @@ -742,15 +743,16 @@ class AdaGrad(StepRule):
"""
def __init__(self, learning_rate=0.002, epsilon=1e-6):
self.learning_rate = learning_rate
self.epsilon = epsilon
self.learning_rate = shared_floatx(learning_rate, "learning_rate")
self.epsilon = shared_floatx(epsilon, "epsilon")
add_role(self.learning_rate, ALGORITHM_HYPERPARAMETER)
add_role(self.epsilon, ALGORITHM_HYPERPARAMETER)

def compute_step(self, parameter, previous_step):
name = 'adagrad_sqs'
if parameter.name:
name += '_' + parameter.name
ssq = shared_floatx(parameter.get_value() * 0.,
name=name)
ssq = shared_floatx_zeros_matching(parameter, name=name)
add_role(ssq, ALGORITHM_BUFFER)

ssq_t = (tensor.sqr(previous_step) + ssq)
Expand Down Expand Up @@ -789,16 +791,19 @@ class Adam(StepRule):
def __init__(self, learning_rate=0.002,
beta1=0.1, beta2=0.001, epsilon=1e-8,
decay_factor=(1 - 1e-8)):
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.decay_factor = decay_factor
self.learning_rate = shared_floatx(learning_rate, "learning_rate")
self.beta1 = shared_floatx(beta1, "beta1")
self.beta2 = shared_floatx(beta2, "beta2")
self.epsilon = shared_floatx(epsilon, "epsilon")
self.decay_factor = shared_floatx(decay_factor, "decay_factor")
for param in [self.learning_rate, self.beta1, self.beta2, self.epsilon,
self.decay_factor]:
add_role(param, ALGORITHM_HYPERPARAMETER)

def compute_step(self, parameter, previous_step):
mean = shared_floatx(parameter.get_value() * 0., 'mean')
mean = shared_floatx_zeros_matching(parameter, 'mean')
add_role(mean, ALGORITHM_BUFFER)
variance = shared_floatx(parameter.get_value() * 0., 'variance')
variance = shared_floatx_zeros_matching(parameter, 'variance')
add_role(variance, ALGORITHM_BUFFER)
time = shared_floatx(0., 'time')
add_role(time, ALGORITHM_BUFFER)
Expand Down
Loading

0 comments on commit 7beb788

Please sign in to comment.