Skip to content

Commit

Permalink
Implement normalize and normalize!
Browse files Browse the repository at this point in the history
Simple helper functions for normalizing vectors

Closes JuliaLang#12047
  • Loading branch information
jiahao authored and bjarthur committed Oct 27, 2015
1 parent c6300f9 commit a5a41b9
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 0 deletions.
2 changes: 2 additions & 0 deletions base/exports.jl
Original file line number Diff line number Diff line change
Expand Up @@ -676,6 +676,8 @@ export
lufact,
lyap,
norm,
normalize,
normalize!,
nullspace,
ordschur!,
ordschur,
Expand Down
2 changes: 2 additions & 0 deletions base/linalg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ export
lufact!,
lyap,
norm,
normalize,
normalize!,
nullspace,
ordschur!,
ordschur,
Expand Down
65 changes: 65 additions & 0 deletions base/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -529,3 +529,68 @@ function isapprox{T<:Number,S<:Number}(x::AbstractArray{T}, y::AbstractArray{S};
d = norm(x - y)
return isfinite(d) ? d <= atol + rtol*max(norm(x), norm(y)) : x == y
end

"""
normalize!(v, [p=2])
Normalize the vector `v` in-place with respect to the `p`-norm.
# Inputs
- `v::AbstractVector` - vector to be normalized
- `p::Real` - The `p`-norm to normalize with respect to. Default: 2
# Output
- `v` - A unit vector being the input vector, rescaled to have norm 1.
The input vector is modified in-place.
# See also
`normalize`
"""
function normalize!(v::AbstractVector, p::Real=2)
nrm = norm(v, p)

#The largest positive floating point number whose inverse is less than
#infinity
const δ = inv(prevfloat(typemax(float(nrm))))

if nrm δ #Safe to multiply with inverse
invnrm = inv(nrm)
scale!(v, invrm)

else #Divide by norm; slower but more correct
#Note 2015-10-19: As of Julia 0.4, @simd does not vectorize floating
#point division, although vectorized intrinsics like DIVPD exist. I
#will leave the @simd annotation in, hoping that someone will improve
#the @simd macro in the future. - cjh
@inbounds @simd for i in eachindex(v)
v[i] /= nrm
end
end

v
end

"""
normalize(v, [p=2])
Normalize the vector `v` with respect to the `p`-norm.
# Inputs
- `v::AbstractVector` - vector to be normalized
- `p::Real` - The `p`-norm to normalize with respect to. Default: 2
# Output
- `v` - A unit vector being a copy of the input vector, scaled to have norm 1
# See also
`normalize!`
"""
normalize(v::AbstractVector, p::Real=2) = v/norm(v, p)

21 changes: 21 additions & 0 deletions test/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -150,3 +150,24 @@ let
@test LinAlg.axpy!(α, x, deepcopy(y)) == x .* Matrix{Int}[α]
@test LinAlg.axpy!(α, x, deepcopy(y)) != Matrix{Int}[α] .* x
end

let
v = [3.0, 4.0]
@test norm(v) === 5.0
w = normalize(v)
@test w == [0.6, 0.8]
@test norm(w) === 1.0
@test normalize!(v) == w
end

#Test potential overflow in normalize!
let
δ = inv(prevfloat(typemax(float(nrm))))
v = [δ, -δ]

@test norm(v) === 7.866824069956793e-309
w = normalize(v)
@test norm(w) === 1.0
@test w [1/√2, -1/√2]
@test normalize!(v) == w
end

0 comments on commit a5a41b9

Please sign in to comment.