|
| 1 | +# Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +# or more contributor license agreements. See the NOTICE file |
| 3 | +# distributed with this work for additional information |
| 4 | +# regarding copyright ownership. The ASF licenses this file |
| 5 | +# to you under the Apache License, Version 2.0 (the |
| 6 | +# "License"); you may not use this file except in compliance |
| 7 | +# with the License. You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, |
| 12 | +# software distributed under the License is distributed on an |
| 13 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +# KIND, either express or implied. See the License for the |
| 15 | +# specific language governing permissions and limitations |
| 16 | +# under the License. |
| 17 | +"""Batch normalization.""" |
| 18 | +import typing |
| 19 | + |
| 20 | +from tvm import te |
| 21 | +from tvm import topi |
| 22 | + |
| 23 | + |
| 24 | +def batch_norm( |
| 25 | + data: te.Tensor, |
| 26 | + gamma: te.Tensor, |
| 27 | + beta: te.Tensor, |
| 28 | + moving_mean: te.Tensor, |
| 29 | + moving_var: te.Tensor, |
| 30 | + axis: typing.Optional[int] = None, |
| 31 | + epsilon: typing.Optional[float] = None, |
| 32 | + center: typing.Optional[bool] = None, |
| 33 | + scale: typing.Optional[bool] = None, |
| 34 | +) -> typing.List[te.Tensor]: |
| 35 | + """Batch normalization layer (Ioffe and Szegedy, 2014). |
| 36 | +
|
| 37 | + Normalizes the input at each batch, i.e. applies a transformation |
| 38 | + that maintains the mean activation close to 0 and the activation |
| 39 | + standard deviation close to 1. |
| 40 | +
|
| 41 | + Parameters |
| 42 | + ---------- |
| 43 | + data : tvm.te.Tensor |
| 44 | + Input to be batch-normalized. |
| 45 | +
|
| 46 | + gamma : tvm.te.Tensor |
| 47 | + Scale factor to be applied to the normalized tensor. |
| 48 | +
|
| 49 | + beta : tvm.te.Tensor |
| 50 | + Offset to be applied to the normalized tensor. |
| 51 | +
|
| 52 | + moving_mean : tvm.te.Tensor |
| 53 | + Running mean of input. |
| 54 | +
|
| 55 | + moving_var : tvm.te.Tensor |
| 56 | + Running variance of input. |
| 57 | +
|
| 58 | + axis : int, optional, default=1 |
| 59 | + Specify along which shape axis the normalization should occur. |
| 60 | +
|
| 61 | + epsilon : float, optional, default=1e-5 |
| 62 | + Small float added to variance to avoid dividing by zero. |
| 63 | +
|
| 64 | + center : bool, optional, default=True |
| 65 | + If True, add offset of beta to normalized tensor, If False, |
| 66 | + beta is ignored. |
| 67 | +
|
| 68 | + scale : bool, optional, defualt=True |
| 69 | + If True, scale normalized tensor by gamma. If False, gamma |
| 70 | + is ignored. |
| 71 | +
|
| 72 | + Returns |
| 73 | + ------- |
| 74 | + output : list of tvm.te.Tensor |
| 75 | + Normalized data with same shape as input |
| 76 | +
|
| 77 | + moving_mean : tvm.te.Tensor |
| 78 | + Running mean of input. |
| 79 | +
|
| 80 | + moving_var : tvm.te.Tensor |
| 81 | + Running variance of input. |
| 82 | + """ |
| 83 | + if axis is None: |
| 84 | + axis = 1 |
| 85 | + |
| 86 | + if epsilon is None: |
| 87 | + epsilon = 1e-5 |
| 88 | + |
| 89 | + if center is None: |
| 90 | + center = True |
| 91 | + |
| 92 | + if scale is None: |
| 93 | + scale = True |
| 94 | + |
| 95 | + shape = [1] * len(data.shape) |
| 96 | + shape[axis] = data.shape[axis] |
| 97 | + |
| 98 | + moving_mean_rs = topi.reshape(moving_mean, shape) |
| 99 | + moving_var_rs = topi.reshape(moving_var, shape) |
| 100 | + |
| 101 | + out = (data - moving_mean_rs) / topi.math.sqrt(moving_var_rs + epsilon) |
| 102 | + |
| 103 | + if scale: |
| 104 | + out = out * topi.reshape(gamma, shape) |
| 105 | + if center: |
| 106 | + out = out + topi.reshape(beta, shape) |
| 107 | + |
| 108 | + # Moving mean and var aren't updated during test. To avoid |
| 109 | + # placeholder reuse, we multiply by 1 and return them. |
| 110 | + return [out, moving_mean * 1, moving_var * 1] |
0 commit comments