Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Base.LinAlg to new LinearAlgebra stdlib
Browse files Browse the repository at this point in the history
fredrikekre committed Jan 17, 2018

Verified

This commit was signed with the committer’s verified signature.
sauclovian-g David Holland
1 parent 3d84bbb commit 8b73a98
Showing 126 changed files with 2,515 additions and 2,429 deletions.
3 changes: 3 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
@@ -898,6 +898,9 @@ Deprecated or removed

* Sparse array functionality has moved to the `SparseArrays` standard library module ([#25249]).

* Linear algebra functionality, and specifically the `LinAlg` module has moved to the
`LinearAlgebra` standard library module ([#25571]).

* `@printf` and `@sprintf` have been moved to the `Printf` standard library ([#23929],[#25056]).

* The aliases `Complex32`, `Complex64` and `Complex128` have been deprecated in favor of `ComplexF16`,
7 changes: 6 additions & 1 deletion base/array.jl
Original file line number Diff line number Diff line change
@@ -391,7 +391,12 @@ end
function _one(unit::T, x::AbstractMatrix) where T
m,n = size(x)
m==n || throw(DimensionMismatch("multiplicative identity defined only for square matrices"))
Matrix{T}(I, m, m)
# Matrix{T}(I, m, m)
I = zeros(T, m, m)
for i in 1:m
I[i,i] = 1
end
I
end

one(x::AbstractMatrix{T}) where {T} = _one(one(T), x)
1,525 changes: 147 additions & 1,378 deletions base/deprecated.jl

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion base/essentials.jl
Original file line number Diff line number Diff line change
@@ -371,7 +371,7 @@ julia> sizeof(ComplexF64)
If `T` does not have a specific size, an error is thrown.
```jldoctest
julia> sizeof(Base.LinAlg.LU)
julia> sizeof(Base.SubArray)
ERROR: argument is an abstract type; size is indeterminate
Stacktrace:
[...]
98 changes: 1 addition & 97 deletions base/exports.jl
Original file line number Diff line number Diff line change
@@ -8,9 +8,6 @@ export
StackTraces,
Sys,
Libc,
LinAlg,
BLAS,
LAPACK,
Serializer,
Docs,
Markdown,
@@ -29,7 +26,6 @@ export
AbstractVecOrMat,
Array,
AbstractDict,
Bidiagonal,
BigFloat,
BigInt,
BitArray,
@@ -46,22 +42,16 @@ export
ComplexF64,
ComplexF32,
ComplexF16,
ConjVector,
ConjMatrix,
DenseMatrix,
DenseVecOrMat,
DenseVector,
DevNull,
Diagonal,
Dict,
Dims,
EachLine,
Enum,
Enumerate,
ExponentialBackOff,
Factorization,
Hermitian,
UniformScaling,
IndexCartesian,
IndexLinear,
IndexStyle,
@@ -70,7 +60,6 @@ export
IOBuffer,
IOStream,
LinSpace,
LowerTriangular,
Irrational,
Matrix,
MergeSort,
@@ -94,8 +83,6 @@ export
RoundNearestTiesUp,
RoundToZero,
RoundUp,
Adjoint,
Transpose,
AbstractSerializer,
SerializationState,
Set,
@@ -108,12 +95,8 @@ export
StridedVector,
SubArray,
SubString,
Symmetric,
SymTridiagonal,
Timer,
Tridiagonal,
UnitRange,
UpperTriangular,
Val,
VecOrMat,
Vector,
@@ -178,7 +161,6 @@ export
im,
π, pi,
ℯ,
I,

# Operators
!,
@@ -501,87 +483,9 @@ export
startswith,

# linear algebra
bkfact!,
bkfact,
chol,
cholfact!,
cholfact,
cond,
condskeel,
cross,
adjoint!,
adjoint,
det,
diag,
diagind,
diagm,
diff,
dot,
eig,
eigfact!,
eigfact,
eigmax,
eigmin,
eigvals,
eigvals!,
eigvecs,
factorize,
givens,
hessfact!,
hessfact,
isdiag,
ishermitian,
isposdef!,
isposdef,
issymmetric,
istril,
istriu,
kron,
ldltfact,
ldltfact!,
linreg,
logabsdet,
logdet,
lu,
lufact!,
lufact,
lyap,
norm,
normalize,
normalize!,
nullspace,
ordschur!,
ordschur,
peakflops,
pinv,
qr,
qrfact!,
qrfact,
lq,
lqfact!,
lqfact,
rank,
scale!,
schur,
schurfact!,
schurfact,
svd,
svdfact!,
svdfact,
svdvals!,
svdvals,
sylvester,
trace,
transpose!,
transpose,
tril!,
tril,
triu!,
triu,
vecdot,
vecnorm,
,
×,
kron,

# bitarrays
falses,
14 changes: 7 additions & 7 deletions base/interactiveutil.jl
Original file line number Diff line number Diff line change
@@ -322,13 +322,13 @@ function versioninfo(io::IO=STDOUT; verbose::Bool=false, packages::Bool=false)
println(io)
end
println(io, " WORD_SIZE: ", Sys.WORD_SIZE)
if Base.libblas_name == "libopenblas" || BLAS.vendor() == :openblas || BLAS.vendor() == :openblas64
openblas_config = BLAS.openblas_get_config()
println(io, " BLAS: libopenblas (", openblas_config, ")")
else
println(io, " BLAS: ",libblas_name)
end
println(io, " LAPACK: ",liblapack_name)
# if Base.libblas_name == "libopenblas" || BLAS.vendor() == :openblas || BLAS.vendor() == :openblas64
# openblas_config = BLAS.openblas_get_config()
# println(io, " BLAS: libopenblas (", openblas_config, ")")
# else
# println(io, " BLAS: ",libblas_name)
# end
# println(io, " LAPACK: ",liblapack_name)
println(io, " LIBM: ",libm_name)
println(io, " LLVM: libLLVM-",libllvm_version," (", Sys.JIT, ", ", Sys.CPU_NAME, ")")

2 changes: 1 addition & 1 deletion base/math.jl
Original file line number Diff line number Diff line change
@@ -506,7 +506,7 @@ end
Compute the hypotenuse ``\\sqrt{\\sum x_i^2}`` avoiding overflow and underflow.
"""
hypot(x::Number...) = vecnorm(x)
hypot(x::Number...) = sqrt(sum(abs2(y) for y in x))

"""
atan2(y, x)
2 changes: 1 addition & 1 deletion base/precompile.jl
Original file line number Diff line number Diff line change
@@ -1234,7 +1234,7 @@ precompile(Tuple{typeof(Base.join), Base.GenericIOBuffer{Array{UInt8, 1}}, Tuple
precompile(Tuple{typeof(Base.setindex!), Base.Dict{Int64, Nothing}, Nothing, Int64})
precompile(Tuple{Type{Array{Union{Tuple{Any, Int64}, Tuple{Tuple{}, Any, Bool}}, 1}}, Tuple{Int64}})
precompile(Tuple{typeof(Base.eachindex), Array{Union{Tuple{Any, Int64}, Tuple{Tuple{}, Any, Bool}}, 1}})
precompile(Tuple{typeof(Base.LinAlg.BLAS.set_num_threads), Int64})
# precompile(Tuple{typeof(Base.LinearAlgebra.BLAS.set_num_threads), Int64})
precompile(Tuple{typeof(Base.eltype), Type{Base.Union{IO, Nothing}}})
precompile(Tuple{Type{Base.Union{IO, Nothing}}})
precompile(Tuple{typeof(Base.eltype), Type{Base.Union{AbstractString, Nothing}}})
8 changes: 4 additions & 4 deletions base/reflection.jl
Original file line number Diff line number Diff line change
@@ -9,8 +9,8 @@ Get the name of a `Module` as a `Symbol`.
# Examples
```jldoctest
julia> module_name(Base.LinAlg)
:LinAlg
julia> module_name(Base.Broadcast)
:Broadcast
```
"""
module_name(m::Module) = ccall(:jl_module_name, Ref{Symbol}, (Any,), m)
@@ -25,8 +25,8 @@ Get a module's enclosing `Module`. `Main` is its own parent.
julia> module_parent(Main)
Main
julia> module_parent(Base.LinAlg.BLAS)
Base.LinAlg
julia> module_parent(Base.Broadcast)
Base
```
"""
module_parent(m::Module) = ccall(:jl_module_parent, Ref{Module}, (Any,), m)
20 changes: 14 additions & 6 deletions base/statistics.jl
Original file line number Diff line number Diff line change
@@ -59,7 +59,8 @@ julia> mean!([1. 1.], v)
"""
function mean!(R::AbstractArray, A::AbstractArray)
sum!(R, A; init=true)
scale!(R, max(1, _length(R)) // _length(A))
x = max(1, _length(R)) // _length(A)
R .= R .* x
return R
end

@@ -175,7 +176,8 @@ function varm!(R::AbstractArray{S}, A::AbstractArray, m::AbstractArray; correcte
fill!(R, convert(S, NaN))
else
rn = div(_length(A), _length(R)) - Int(corrected)
scale!(centralize_sumabs2!(R, A, m), 1//rn)
centralize_sumabs2!(R, A, m)
R .= R .* (1 // rn)
end
return R
end
@@ -328,7 +330,7 @@ unscaled_covzm(x::AbstractVector{<:Number}) = sum(abs2, x)
unscaled_covzm(x::AbstractVector) = sum(t -> t*t', x)
unscaled_covzm(x::AbstractMatrix, vardim::Int) = (vardim == 1 ? _conj(x'x) : x * x')

unscaled_covzm(x::AbstractVector, y::AbstractVector) = dot(y, x)
unscaled_covzm(x::AbstractVector, y::AbstractVector) = sum(conj(y[i])*x[i] for i in eachindex(y, x))
unscaled_covzm(x::AbstractVector, y::AbstractMatrix, vardim::Int) =
(vardim == 1 ? *(transpose(x), _conj(y)) : *(transpose(x), transpose(_conj(y))))
unscaled_covzm(x::AbstractMatrix, y::AbstractVector, vardim::Int) =
@@ -342,14 +344,20 @@ covzm(x::AbstractVector; corrected::Bool=true) = unscaled_covzm(x) / (_length(x)
function covzm(x::AbstractMatrix, vardim::Int=1; corrected::Bool=true)
C = unscaled_covzm(x, vardim)
T = promote_type(typeof(first(C) / 1), eltype(C))
return scale!(convert(AbstractMatrix{T}, C), 1//(size(x, vardim) - corrected))
A = convert(AbstractMatrix{T}, C)
b = 1//(size(x, vardim) - corrected)
A .= A .* b
return A
end
covzm(x::AbstractVector, y::AbstractVector; corrected::Bool=true) =
unscaled_covzm(x, y) / (_length(x) - Int(corrected))
function covzm(x::AbstractVecOrMat, y::AbstractVecOrMat, vardim::Int=1; corrected::Bool=true)
C = unscaled_covzm(x, y, vardim)
T = promote_type(typeof(first(C) / 1), eltype(C))
return scale!(convert(AbstractArray{T}, C), 1//(_getnobs(x, y, vardim) - corrected))
A = convert(AbstractArray{T}, C)
b = 1//(_getnobs(x, y, vardim) - corrected)
A .= A .* b
return A
end

# covm (with provided mean)
@@ -467,7 +475,7 @@ end
corzm(x::AbstractVector{T}) where {T} = one(real(T))
function corzm(x::AbstractMatrix, vardim::Int=1)
c = unscaled_covzm(x, vardim)
return cov2cor!(c, sqrt!(diag(c)))
return cov2cor!(c, collect(sqrt(c[i,i]) for i in 1:min(size(c)...)))
end
corzm(x::AbstractVector, y::AbstractMatrix, vardim::Int=1) =
cov2cor!(unscaled_covzm(x, y, vardim), sqrt(sum(abs2, x)), sqrt!(sum(abs2, y, vardim)))
12 changes: 5 additions & 7 deletions base/sysimg.jl
Original file line number Diff line number Diff line change
@@ -460,12 +460,6 @@ import Base64

INCLUDE_STATE = 2

# dense linear algebra
include("linalg/linalg.jl")
using .LinAlg
const = dot
const × = cross

include("asyncmap.jl")

include("multimedia.jl")
@@ -520,6 +514,7 @@ Base.require(:FileWatching)
Base.require(:Future)
Base.require(:IterativeEigensolvers)
Base.require(:Libdl)
Base.require(:LinearAlgebra)
Base.require(:Logging)
Base.require(:Mmap)
Base.require(:Printf)
@@ -551,10 +546,13 @@ Base.require(:Unicode)
", run `using SparseArrays` to load sparse array functionality")
@deprecate_binding(SparseVector, root_module(:SparseArrays).SparseVector, true,
", run `using SparseArrays` to load sparse array functionality")

# PR #25571
@deprecate_binding LinAlg root_module(:LinearAlgebra) true ", run `using LinearAlgebra` instead"
end

empty!(LOAD_PATH)

Base.isfile("userimg.jl") && Base.include(Main, "userimg.jl")

Base.include(Base, "precompile.jl")
# Base.include(Base, "precompile.jl")
6 changes: 3 additions & 3 deletions doc/REQUIRE
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
Compat 0.39.0 0.39.0+
DocStringExtensions 0.4.1 0.4.1+
Documenter 0.12.4 0.12.4+
Compat
DocStringExtensions
Documenter
6 changes: 4 additions & 2 deletions doc/make.jl
Original file line number Diff line number Diff line change
@@ -4,6 +4,10 @@ Pkg.init()
cp(joinpath(@__DIR__, "REQUIRE"), Pkg.dir("REQUIRE"); remove_destination = true)
Pkg.update()
Pkg.resolve()
Pkg.checkout("Documenter")
# Pkg.checkout("Documenter", "fe/deprecations") # can't push my branch to JuliaDocs...
Pkg.checkout("Compat")
Pkg.checkout("Compat", "fe/linalg") # to make Compat load

using Documenter

@@ -68,7 +72,6 @@ const PAGES = [
"manual/documentation.md",
"manual/metaprogramming.md",
"manual/arrays.md",
"manual/linear-algebra.md",
"manual/missing.md",
"manual/networking-and-streams.md",
"manual/parallel-computing.md",
@@ -98,7 +101,6 @@ const PAGES = [
"base/arrays.md",
"base/parallel.md",
"base/multi-threading.md",
"base/linalg.md",
"base/constants.md",
"base/file.md",
"base/io-network.md",
3 changes: 1 addition & 2 deletions doc/src/base/arrays.md
Original file line number Diff line number Diff line change
@@ -50,7 +50,6 @@ Base.IndexStyle
Base.conj!
Base.stride
Base.strides
Base.LinAlg.checksquare
```

## Broadcast and vectorization
@@ -148,7 +147,7 @@ Base.cumprod
Base.cumprod!
Base.cumsum
Base.cumsum!
Base.LinAlg.diff
LinearAlgebra.diff
Base.repeat(::AbstractArray)
Base.rot180
Base.rotl90
3 changes: 0 additions & 3 deletions doc/src/base/base.md
Original file line number Diff line number Diff line change
@@ -86,13 +86,10 @@ primitive type

## Base Modules
```@docs
Base.BLAS
Base.Docs
Base.Iterators
Base.LAPACK
Base.LibGit2
Base.Libc
Base.LinAlg
Base.Markdown
Base.Meta
Base.Pkg
1 change: 0 additions & 1 deletion doc/src/base/index.md
Original file line number Diff line number Diff line change
@@ -10,7 +10,6 @@
* [Distributed Computing](@ref)
* [Shared Arrays](@ref)
* [Multi-Threading](@ref)
* [Linear Algebra](@ref)
* [Constants](@ref lib-constants)
* [Filesystem](@ref)
* [Delimited Files](@ref)
340 changes: 0 additions & 340 deletions doc/src/base/linalg.md

This file was deleted.

3 changes: 1 addition & 2 deletions doc/src/index.md
Original file line number Diff line number Diff line change
@@ -30,7 +30,6 @@ Please read the [release notes](NEWS.md) to see what has changed since the last
* [Documentation](@ref)
* [Metaprogramming](@ref)
* [Multi-dimensional Arrays](@ref man-multi-dim-arrays)
* [Linear Algebra](@ref)
* [Missing Values](@ref missing)
* [Networking and Streams](@ref)
* [Parallel Computing](@ref)
@@ -63,7 +62,6 @@ Please read the [release notes](NEWS.md) to see what has changed since the last
* [Distributed Computing](@ref)
* [Multi-Threading](@ref)
* [Shared Arrays](@ref)
* [Linear Algebra](@ref)
* [Constants](@ref lib-constants)
* [Filesystem](@ref)
* [I/O and Network](@ref)
@@ -91,6 +89,7 @@ Please read the [release notes](NEWS.md) to see what has changed since the last
* [Profiling](@ref lib-profiling)
* [Random Numbers](@ref)
* [Shared Arrays](@ref)
* [Linear Algebra](@ref)
* [Sparse Arrays](@ref)
* [Unicode](@ref)
* [Unit Testing](@ref)
1 change: 0 additions & 1 deletion doc/src/manual/index.md
Original file line number Diff line number Diff line change
@@ -19,7 +19,6 @@
* [Documentation](@ref)
* [Metaprogramming](@ref)
* [Multi-dimensional Arrays](@ref man-multi-dim-arrays)
* [Linear algebra](@ref)
* [Missing Values](@ref missing)
* [Networking and Streams](@ref)
* [Parallel Computing](@ref)
3 changes: 2 additions & 1 deletion stdlib/Distributed/src/cluster.jl
Original file line number Diff line number Diff line change
@@ -163,8 +163,9 @@ mutable struct LocalProcess
end


import LinearAlgebra
function disable_threaded_libs()
BLAS.set_num_threads(1)
LinearAlgebra.BLAS.set_num_threads(1)
end

worker_timeout() = parse(Float64, get(ENV, "JULIA_WORKER_TIMEOUT", "60.0"))
4 changes: 2 additions & 2 deletions stdlib/Distributed/test/distributed_exec.jl
Original file line number Diff line number Diff line change
@@ -18,7 +18,7 @@ include(joinpath(Sys.BINDIR, "..", "share", "julia", "test", "testenv.jl"))
addprocs_with_testenv(4)
@test nprocs() == 5

@everywhere using Test, Random
@everywhere using Test, Random, LinearAlgebra

id_me = myid()
id_other = filter(x -> x != id_me, procs())[rand(1:(nprocs()-1))]
@@ -916,7 +916,7 @@ end
# Test addprocs enable_threaded_blas parameter

const get_num_threads = function() # anonymous so it will be serialized when called
blas = BLAS.vendor()
blas = LinearAlgebra.BLAS.vendor()
# Wrap in a try to catch unsupported blas versions
try
if blas == :openblas
13 changes: 8 additions & 5 deletions stdlib/IterativeEigensolvers/src/IterativeEigensolvers.jl
Original file line number Diff line number Diff line change
@@ -7,7 +7,10 @@ Arnoldi and Lanczos iteration for computing eigenvalues
"""
module IterativeEigensolvers

using Base.LinAlg: BlasFloat, BlasInt, SVD, checksquare, mul!
using LinearAlgebra: BlasFloat, BlasInt, SVD, checksquare, mul!,
UniformScaling, issymmetric, ishermitian,
factorize, I, scale!, qr
import LinearAlgebra

export eigs, svds

@@ -205,14 +208,14 @@ function SVDAugmented(A::AbstractMatrix{T}) where T
SVDAugmented{Tnew,typeof(Anew)}(Anew)
end

function Base.LinAlg.mul!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where T
function LinearAlgebra.mul!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where T
m, mn = size(A.X, 1), length(x)
mul!( view(y, 1:m), A.X, view(x, m + 1:mn)) # left singular vector
mul!(view(y, m + 1:mn), adjoint(A.X), view(x, 1:m)) # right singular vector
return y
end
Base.size(A::SVDAugmented) = ((+)(size(A.X)...), (+)(size(A.X)...))
Base.ishermitian(A::SVDAugmented) = true
LinearAlgebra.ishermitian(A::SVDAugmented) = true

struct AtA_or_AAt{T,S} <: AbstractArray{T, 2}
A::S
@@ -225,7 +228,7 @@ function AtA_or_AAt(A::AbstractMatrix{T}) where T
AtA_or_AAt{Tnew,typeof(Anew)}(Anew, Vector{Tnew}(uninitialized, max(size(A)...)))
end

function Base.LinAlg.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where T
function LinearAlgebra.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where T
if size(A.A, 1) >= size(A.A, 2)
mul!(A.buffer, A.A, x)
return mul!(y, adjoint(A.A), A.buffer)
@@ -235,7 +238,7 @@ function Base.LinAlg.mul!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVecto
end
end
Base.size(A::AtA_or_AAt) = ntuple(i -> min(size(A.A)...), Val(2))
Base.ishermitian(s::AtA_or_AAt) = true
LinearAlgebra.ishermitian(s::AtA_or_AAt) = true


svds(A::AbstractMatrix{<:BlasFloat}; kwargs...) = _svds(A; kwargs...)
2 changes: 1 addition & 1 deletion stdlib/IterativeEigensolvers/src/arpack.jl
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@

module ARPACK

import ..LinAlg: BlasInt, ARPACKException
import LinearAlgebra: BlasInt, ARPACKException

## aupd and eupd wrappers

6 changes: 2 additions & 4 deletions stdlib/IterativeEigensolvers/src/deprecated.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

@eval IterativeEigensolvers begin
Base.A_mul_B!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where {T} = Base.LinAlg.mul!(y, A, x)
Base.A_mul_B!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where {T} = Base.LinAlg.mul!(y, A, x)
end
LinearAlgebra.A_mul_B!(y::StridedVector{T}, A::AtA_or_AAt{T}, x::StridedVector{T}) where {T} = LinearAlgebra.mul!(y, A, x)
LinearAlgebra.A_mul_B!(y::StridedVector{T}, A::SVDAugmented{T}, x::StridedVector{T}) where {T} = LinearAlgebra.mul!(y, A, x)
18 changes: 9 additions & 9 deletions stdlib/IterativeEigensolvers/test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using IterativeEigensolvers
using Test, SparseArrays, Random
using Test, LinearAlgebra, SparseArrays, Random

@testset "eigs" begin
srand(1234)
@@ -33,7 +33,7 @@ using Test, SparseArrays, Random
(d,v) = eigs(a, nev=3)
@test a*v[:,2] d[2]*v[:,2]
@test norm(v) > testtol # eigenvectors cannot be null vectors
(d,v) = eigs(a, I, nev=3) # test eigs(A, B; kwargs...)
(d,v) = eigs(a, LinearAlgebra.I, nev=3) # test eigs(A, B; kwargs...)
@test a*v[:,2] d[2]*v[:,2]
@test norm(v) > testtol # eigenvectors cannot be null vectors
@test_logs (:warn,"Use symbols instead of strings for specifying which eigenvalues to compute") eigs(a, which="LM")
@@ -42,7 +42,7 @@ using Test, SparseArrays, Random
# (d,v) = eigs(a, b, nev=3, tol=1e-8) # not handled yet
# @test a*v[:,2] ≈ d[2]*b*v[:,2] atol=testtol
# @test norm(v) > testtol # eigenvectors cannot be null vectors
if elty <: Base.LinAlg.BlasComplex
if elty <: LinearAlgebra.BlasComplex
sr_ind = indmin(real.(a_evs))
(d, v) = eigs(a, nev=1, which=:SR)
@test d[1] a_evs[sr_ind]
@@ -130,15 +130,15 @@ let A6965 = [
end

# Example from Quantum Information Theory
import Base: size, issymmetric, ishermitian
import Base: size

mutable struct CPM{T<:Base.LinAlg.BlasFloat} <: AbstractMatrix{T} # completely positive map
mutable struct CPM{T<:LinearAlgebra.BlasFloat} <: AbstractMatrix{T} # completely positive map
kraus::Array{T,3} # kraus operator representation
end
size(Phi::CPM) = (size(Phi.kraus,1)^2,size(Phi.kraus,3)^2)
issymmetric(Phi::CPM) = false
ishermitian(Phi::CPM) = false
function Base.LinAlg.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:Base.LinAlg.BlasFloat}
LinearAlgebra.issymmetric(Phi::CPM) = false
LinearAlgebra.ishermitian(Phi::CPM) = false
function LinearAlgebra.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:LinearAlgebra.BlasFloat}
rho = reshape(rho,(size(Phi.kraus,3),size(Phi.kraus,3)))
rho1 = zeros(T,(size(Phi.kraus,1),size(Phi.kraus,1)))
for s = 1:size(Phi.kraus,2)
@@ -147,7 +147,7 @@ function Base.LinAlg.mul!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{
end
return copyto!(rho2,rho1)
end
Base.LinAlg.A_mul_B!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:Base.LinAlg.BlasFloat} = Base.LinAlg.mul!(rho2, Phi, rho)
LinearAlgebra.A_mul_B!(rho2::StridedVector{T},Phi::CPM{T},rho::StridedVector{T}) where {T<:LinearAlgebra.BlasFloat} = LinearAlgebra.mul!(rho2, Phi, rho)
# after the A_mul_B! deprecation, remove this A_mul_B! def

let
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Linear algebra
# Linear Algebra

In addition to (and as part of) its support for multi-dimensional arrays, Julia provides native implementations
of many common and useful linear algebra operations. Basic operations, such as [`trace`](@ref), [`det`](@ref),
@@ -56,7 +56,7 @@ julia> A = [1.5 2 -4; 3 -1 -6; -10 2.3 4]
-10.0 2.3 4.0
julia> factorize(A)
Base.LinAlg.LU{Float64,Array{Float64,2}} with factors L and U:
LinearAlgebra.LU{Float64,Array{Float64,2}} with factors L and U:
[1.0 0.0 0.0; -0.15 1.0 0.0; -0.3 -0.132196 1.0]
[-10.0 2.3 4.0; 0.0 2.345 -3.4; 0.0 0.0 -5.24947]
```
@@ -72,14 +72,14 @@ julia> B = [1.5 2 -4; 2 -1 -3; -4 -3 5]
-4.0 -3.0 5.0
julia> factorize(B)
Base.LinAlg.BunchKaufman{Float64,Array{Float64,2}}
LinearAlgebra.BunchKaufman{Float64,Array{Float64,2}}
D factor:
3×3 Tridiagonal{Float64,Array{Float64,1}}:
-1.64286 0.0 ⋅
0.0 -2.8 0.0
⋅ 0.0 5.0
U factor:
3×3 Base.LinAlg.UnitUpperTriangular{Float64,Array{Float64,2}}:
3×3 LinearAlgebra.UnitUpperTriangular{Float64,Array{Float64,2}}:
1.0 0.142857 -0.8
0.0 1.0 -0.6
0.0 0.0 1.0
@@ -260,8 +260,8 @@ compute the factorization of a matrix into a product of matrices, and are one of
in linear algebra.

The following table summarizes the types of matrix factorizations that have been implemented in
Julia. Details of their associated methods can be found in the [Linear Algebra](@ref) section
of the Julia Base documentation.
Julia. Details of their associated methods can be found in the [Standard Functions](@ref) section
of the Linear Algebra documentation.

| Type | Description |
|:----------------- |:-------------------------------------------------------------------------------------------------------------- |
@@ -276,3 +276,348 @@ of the Julia Base documentation.
| `Eigen` | [Spectral decomposition](https://en.wikipedia.org/wiki/Eigendecomposition_(matrix)) |
| `SVD` | [Singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) |
| `GeneralizedSVD` | [Generalized SVD](https://en.wikipedia.org/wiki/Generalized_singular_value_decomposition#Higher_order_version) |




## Standard Functions

Linear algebra functions in Julia are largely implemented by calling functions from [LAPACK](http://www.netlib.org/lapack/).
Sparse factorizations call functions from [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html).

```@docs
Base.:*(::AbstractMatrix, ::AbstractMatrix)
Base.:\(::AbstractMatrix, ::AbstractVecOrMat)
LinearAlgebra.dot
LinearAlgebra.vecdot
LinearAlgebra.cross
LinearAlgebra.factorize
LinearAlgebra.Diagonal
LinearAlgebra.Bidiagonal
LinearAlgebra.SymTridiagonal
LinearAlgebra.Tridiagonal
LinearAlgebra.Symmetric
LinearAlgebra.Hermitian
LinearAlgebra.LowerTriangular
LinearAlgebra.UpperTriangular
LinearAlgebra.UniformScaling
LinearAlgebra.lu
LinearAlgebra.lufact
LinearAlgebra.lufact!
LinearAlgebra.chol
LinearAlgebra.cholfact
LinearAlgebra.cholfact!
LinearAlgebra.lowrankupdate
LinearAlgebra.lowrankdowndate
LinearAlgebra.lowrankupdate!
LinearAlgebra.lowrankdowndate!
LinearAlgebra.ldltfact
LinearAlgebra.ldltfact!
LinearAlgebra.qr
LinearAlgebra.qr!
LinearAlgebra.qrfact
LinearAlgebra.qrfact!
LinearAlgebra.QR
LinearAlgebra.QRCompactWY
LinearAlgebra.QRPivoted
LinearAlgebra.lqfact!
LinearAlgebra.lqfact
LinearAlgebra.lq
LinearAlgebra.bkfact
LinearAlgebra.bkfact!
LinearAlgebra.eig
LinearAlgebra.eigvals
LinearAlgebra.eigvals!
LinearAlgebra.eigmax
LinearAlgebra.eigmin
LinearAlgebra.eigvecs
LinearAlgebra.eigfact
LinearAlgebra.eigfact!
LinearAlgebra.hessfact
LinearAlgebra.hessfact!
LinearAlgebra.schurfact
LinearAlgebra.schurfact!
LinearAlgebra.schur
LinearAlgebra.ordschur
LinearAlgebra.ordschur!
LinearAlgebra.svdfact
LinearAlgebra.svdfact!
LinearAlgebra.svd
LinearAlgebra.svdvals
LinearAlgebra.svdvals!
LinearAlgebra.Givens
LinearAlgebra.givens
LinearAlgebra.triu
LinearAlgebra.triu!
LinearAlgebra.tril
LinearAlgebra.tril!
LinearAlgebra.diagind
LinearAlgebra.diag
LinearAlgebra.diagm
LinearAlgebra.scale!
LinearAlgebra.rank
LinearAlgebra.norm
LinearAlgebra.vecnorm
LinearAlgebra.normalize!
LinearAlgebra.normalize
LinearAlgebra.cond
LinearAlgebra.condskeel
LinearAlgebra.trace
LinearAlgebra.det
LinearAlgebra.logdet
LinearAlgebra.logabsdet
Base.inv(::AbstractMatrix)
LinearAlgebra.pinv
LinearAlgebra.nullspace
Base.repmat
Base.kron
LinearAlgebra.linreg
LinearAlgebra.exp(::StridedMatrix{<:LinearAlgebra.BlasFloat})
LinearAlgebra.log(::StridedMatrix)
LinearAlgebra.sqrt(::StridedMatrix{<:Real})
LinearAlgebra.cos(::StridedMatrix{<:Real})
LinearAlgebra.sin(::StridedMatrix{<:Real})
LinearAlgebra.sincos(::StridedMatrix{<:Real})
LinearAlgebra.tan(::StridedMatrix{<:Real})
LinearAlgebra.sec(::StridedMatrix)
LinearAlgebra.csc(::StridedMatrix)
LinearAlgebra.cot(::StridedMatrix)
LinearAlgebra.cosh(::StridedMatrix)
LinearAlgebra.sinh(::StridedMatrix)
LinearAlgebra.tanh(::StridedMatrix)
LinearAlgebra.sech(::StridedMatrix)
LinearAlgebra.csch(::StridedMatrix)
LinearAlgebra.coth(::StridedMatrix)
LinearAlgebra.acos(::StridedMatrix)
LinearAlgebra.asin(::StridedMatrix)
LinearAlgebra.atan(::StridedMatrix)
LinearAlgebra.asec(::StridedMatrix)
LinearAlgebra.acsc(::StridedMatrix)
LinearAlgebra.acot(::StridedMatrix)
LinearAlgebra.acosh(::StridedMatrix)
LinearAlgebra.asinh(::StridedMatrix)
LinearAlgebra.atanh(::StridedMatrix)
LinearAlgebra.asech(::StridedMatrix)
LinearAlgebra.acsch(::StridedMatrix)
LinearAlgebra.acoth(::StridedMatrix)
LinearAlgebra.lyap
LinearAlgebra.sylvester
LinearAlgebra.issuccess
LinearAlgebra.issymmetric
LinearAlgebra.isposdef
LinearAlgebra.isposdef!
LinearAlgebra.istril
LinearAlgebra.istriu
LinearAlgebra.isdiag
LinearAlgebra.ishermitian
LinearAlgebra.RowVector
LinearAlgebra.ConjArray
Base.transpose
LinearAlgebra.transpose!
Base.adjoint
LinearAlgebra.adjoint!
LinearAlgebra.peakflops
LinearAlgebra.stride1
LinearAlgebra.checksquare
```

## Low-level matrix operations

In many cases there are in-place versions of matrix operations that allow you to supply
a pre-allocated output vector or matrix. This is useful when optimizing critical code in order
to avoid the overhead of repeated allocations. These in-place operations are suffixed with `!`
below (e.g. `mul!`) according to the usual Julia convention.

```@docs
LinearAlgebra.mul!
LinearAlgebra.ldiv!
LinearAlgebra.rdiv!
```

## BLAS Functions

In Julia (as in much of scientific computation), dense linear-algebra operations are based on
the [LAPACK library](http://www.netlib.org/lapack/), which in turn is built on top of basic linear-algebra
building-blocks known as the [BLAS](http://www.netlib.org/blas/). There are highly optimized
implementations of BLAS available for every computer architecture, and sometimes in high-performance
linear algebra routines it is useful to call the BLAS functions directly.

`LinearAlgebra.BLAS` provides wrappers for some of the BLAS functions. Those BLAS functions
that overwrite one of the input arrays have names ending in `'!'`. Usually, a BLAS function has
four methods defined, for [`Float64`](@ref), [`Float32`](@ref), `ComplexF64`, and `ComplexF32` arrays.

### [BLAS Character Arguments](@id stdlib-blas-chars)
Many BLAS functions accept arguments that determine whether to transpose an argument (`trans`),
which triangle of a matrix to reference (`uplo` or `ul`),
whether the diagonal of a triangular matrix can be assumed to
be all ones (`dA`) or which side of a matrix multiplication
the input argument belongs on (`side`). The possiblities are:

#### [Multplication Order](@id stdlib-blas-side)
| `side` | Meaning |
|:-------|:--------------------------------------------------------------------|
| `'L'` | The argument goes on the *left* side of a matrix-matrix operation. |
| `'R'` | The argument goes on the *right* side of a matrix-matrix operation. |

#### [Triangle Referencing](@id stdlib-blas-uplo)
| `uplo`/`ul` | Meaning |
|:------------|:------------------------------------------------------|
| `'U'` | Only the *upper* triangle of the matrix will be used. |
| `'L'` | Only the *lower* triangle of the matrix will be used. |

#### [Transposition Operation](@id stdlib-blas-trans)
| `trans`/`tX` | Meaning |
|:-------------|:--------------------------------------------------------|
| `'N'` | The input matrix `X` is not transposed or conjugated. |
| `'T'` | The input matrix `X` will be transposed. |
| `'C'` | The input matrix `X` will be conjugated and transposed. |

#### [Unit Diagonal](@id stdlib-blas-diag)
| `diag`/`dX` | Meaning |
|:------------|:----------------------------------------------------------|
| `'N'` | The diagonal values of the matrix `X` will be read. |
| `'U'` | The diagonal of the matrix `X` is assumed to be all ones. |

```@docs
LinearAlgebra.BLAS
LinearAlgebra.BLAS.dotu
LinearAlgebra.BLAS.dotc
LinearAlgebra.BLAS.blascopy!
LinearAlgebra.BLAS.nrm2
LinearAlgebra.BLAS.asum
LinearAlgebra.axpy!
LinearAlgebra.BLAS.scal!
LinearAlgebra.BLAS.scal
LinearAlgebra.BLAS.ger!
LinearAlgebra.BLAS.syr!
LinearAlgebra.BLAS.syrk!
LinearAlgebra.BLAS.syrk
LinearAlgebra.BLAS.her!
LinearAlgebra.BLAS.herk!
LinearAlgebra.BLAS.herk
LinearAlgebra.BLAS.gbmv!
LinearAlgebra.BLAS.gbmv
LinearAlgebra.BLAS.sbmv!
LinearAlgebra.BLAS.sbmv(::Any, ::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.sbmv(::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.gemm!
LinearAlgebra.BLAS.gemm(::Any, ::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.gemm(::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.gemv!
LinearAlgebra.BLAS.gemv(::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.gemv(::Any, ::Any, ::Any)
LinearAlgebra.BLAS.symm!
LinearAlgebra.BLAS.symm(::Any, ::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.symm(::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.symv!
LinearAlgebra.BLAS.symv(::Any, ::Any, ::Any, ::Any)
LinearAlgebra.BLAS.symv(::Any, ::Any, ::Any)
LinearAlgebra.BLAS.trmm!
LinearAlgebra.BLAS.trmm
LinearAlgebra.BLAS.trsm!
LinearAlgebra.BLAS.trsm
LinearAlgebra.BLAS.trmv!
LinearAlgebra.BLAS.trmv
LinearAlgebra.BLAS.trsv!
LinearAlgebra.BLAS.trsv
LinearAlgebra.BLAS.set_num_threads
LinearAlgebra.I
```

## LAPACK Functions

`LinearAlgebra.LAPACK` provides wrappers for some of the LAPACK functions for linear algebra.
Those functions that overwrite one of the input arrays have names ending in `'!'`.

Usually a function has 4 methods defined, one each for [`Float64`](@ref), [`Float32`](@ref),
`ComplexF64` and `ComplexF32` arrays.

Note that the LAPACK API provided by Julia can and will change in the future. Since this API is
not user-facing, there is no commitment to support/deprecate this specific set of functions in
future releases.

```@docs
LinearAlgebra.LAPACK
LinearAlgebra.LAPACK.gbtrf!
LinearAlgebra.LAPACK.gbtrs!
LinearAlgebra.LAPACK.gebal!
LinearAlgebra.LAPACK.gebak!
LinearAlgebra.LAPACK.gebrd!
LinearAlgebra.LAPACK.gelqf!
LinearAlgebra.LAPACK.geqlf!
LinearAlgebra.LAPACK.geqrf!
LinearAlgebra.LAPACK.geqp3!
LinearAlgebra.LAPACK.gerqf!
LinearAlgebra.LAPACK.geqrt!
LinearAlgebra.LAPACK.geqrt3!
LinearAlgebra.LAPACK.getrf!
LinearAlgebra.LAPACK.tzrzf!
LinearAlgebra.LAPACK.ormrz!
LinearAlgebra.LAPACK.gels!
LinearAlgebra.LAPACK.gesv!
LinearAlgebra.LAPACK.getrs!
LinearAlgebra.LAPACK.getri!
LinearAlgebra.LAPACK.gesvx!
LinearAlgebra.LAPACK.gelsd!
LinearAlgebra.LAPACK.gelsy!
LinearAlgebra.LAPACK.gglse!
LinearAlgebra.LAPACK.geev!
LinearAlgebra.LAPACK.gesdd!
LinearAlgebra.LAPACK.gesvd!
LinearAlgebra.LAPACK.ggsvd!
LinearAlgebra.LAPACK.ggsvd3!
LinearAlgebra.LAPACK.geevx!
LinearAlgebra.LAPACK.ggev!
LinearAlgebra.LAPACK.gtsv!
LinearAlgebra.LAPACK.gttrf!
LinearAlgebra.LAPACK.gttrs!
LinearAlgebra.LAPACK.orglq!
LinearAlgebra.LAPACK.orgqr!
LinearAlgebra.LAPACK.orgql!
LinearAlgebra.LAPACK.orgrq!
LinearAlgebra.LAPACK.ormlq!
LinearAlgebra.LAPACK.ormqr!
LinearAlgebra.LAPACK.ormql!
LinearAlgebra.LAPACK.ormrq!
LinearAlgebra.LAPACK.gemqrt!
LinearAlgebra.LAPACK.posv!
LinearAlgebra.LAPACK.potrf!
LinearAlgebra.LAPACK.potri!
LinearAlgebra.LAPACK.potrs!
LinearAlgebra.LAPACK.pstrf!
LinearAlgebra.LAPACK.ptsv!
LinearAlgebra.LAPACK.pttrf!
LinearAlgebra.LAPACK.pttrs!
LinearAlgebra.LAPACK.trtri!
LinearAlgebra.LAPACK.trtrs!
LinearAlgebra.LAPACK.trcon!
LinearAlgebra.LAPACK.trevc!
LinearAlgebra.LAPACK.trrfs!
LinearAlgebra.LAPACK.stev!
LinearAlgebra.LAPACK.stebz!
LinearAlgebra.LAPACK.stegr!
LinearAlgebra.LAPACK.stein!
LinearAlgebra.LAPACK.syconv!
LinearAlgebra.LAPACK.sysv!
LinearAlgebra.LAPACK.sytrf!
LinearAlgebra.LAPACK.sytri!
LinearAlgebra.LAPACK.sytrs!
LinearAlgebra.LAPACK.hesv!
LinearAlgebra.LAPACK.hetrf!
LinearAlgebra.LAPACK.hetri!
LinearAlgebra.LAPACK.hetrs!
LinearAlgebra.LAPACK.syev!
LinearAlgebra.LAPACK.syevr!
LinearAlgebra.LAPACK.sygvd!
LinearAlgebra.LAPACK.bdsqr!
LinearAlgebra.LAPACK.bdsdc!
LinearAlgebra.LAPACK.gecon!
LinearAlgebra.LAPACK.gehrd!
LinearAlgebra.LAPACK.orghr!
LinearAlgebra.LAPACK.gees!
LinearAlgebra.LAPACK.gges!
LinearAlgebra.LAPACK.trexc!
LinearAlgebra.LAPACK.trsen!
LinearAlgebra.LAPACK.tgsen!
LinearAlgebra.LAPACK.trsyl!
```
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

__precompile__(true)

"""
Linear algebra module. Provides array arithmetic,
matrix factorizations and other linear algebra related
functionality.
"""
module LinAlg
module LinearAlgebra

import Base: \, /, *, ^, +, -, ==
import Base: USE_BLAS64, abs, acos, acosh, acot, acoth, acsc, acsch, adjoint, asec, asech,
@@ -176,15 +178,15 @@ julia> A = [1,2,3,4]
3
4
julia> Base.LinAlg.stride1(A)
julia> LinearAlgebra.stride1(A)
1
julia> B = view(A, 2:2:4)
2-element view(::Array{Int64,1}, 2:2:4) with eltype Int64:
2
4
julia> Base.LinAlg.stride1(B)
julia> LinearAlgebra.stride1(B)
2
```
"""
@@ -197,7 +199,7 @@ stride1(x::DenseArray) = stride(x, 1)::Int
@inline _chkstride1(ok::Bool, A, B...) = _chkstride1(ok & (stride1(A) == 1), B...)

"""
LinAlg.checksquare(A)
LinearAlgebra.checksquare(A)
Check that a matrix is square, then return its common dimension.
For multiple arguments, return a vector.
@@ -206,7 +208,7 @@ For multiple arguments, return a vector.
```jldoctest
julia> A = fill(1, (4,4)); B = fill(1, (5,5));
julia> LinAlg.checksquare(A, B)
julia> LinearAlgebra.checksquare(A, B)
2-element Array{Int64,1}:
4
5
@@ -306,7 +308,22 @@ include("special.jl")
include("bitarray.jl")
include("ldlt.jl")
include("schur.jl")
include("deprecated.jl")

const = dot
const × = cross
export , ×


function versioninfo(io::IO=STDOUT)
if Base.libblas_name == "libopenblas" || BLAS.vendor() == :openblas || BLAS.vendor() == :openblas64
openblas_config = BLAS.openblas_get_config()
println(io, "BLAS: libopenblas (", openblas_config, ")")
else
println(io, "BLAS: ",Base.libblas_name)
end
println(io, "LAPACK: ",Base.liblapack_name)
end

function __init__()
try
@@ -316,8 +333,8 @@ function __init__()
end
catch ex
Base.showerror_nostdio(ex,
"WARNING: Error during initialization of module LinAlg")
"WARNING: Error during initialization of module LinearAlgebra")
end
end

end # module LinAlg
end # module LinearAlgebra
File renamed without changes.
File renamed without changes.
File renamed without changes.
5 changes: 3 additions & 2 deletions base/linalg/blas.jl → stdlib/LinearAlgebra/src/blas.jl
Original file line number Diff line number Diff line change
@@ -63,7 +63,8 @@ export
const libblas = Base.libblas_name
const liblapack = Base.liblapack_name

import ..LinAlg: BlasReal, BlasComplex, BlasFloat, BlasInt, DimensionMismatch, checksquare, stride1, chkstride1, axpy!
import LinearAlgebra
import LinearAlgebra: BlasReal, BlasComplex, BlasFloat, BlasInt, DimensionMismatch, checksquare, stride1, chkstride1, axpy!

import Libdl

@@ -149,7 +150,7 @@ function check()
#
# Check if BlasInt is the expected bitsize, by triggering an error
#
(_, info) = LinAlg.LAPACK.potrf!('U', [1.0 0.0; 0.0 -1.0])
(_, info) = LinearAlgebra.LAPACK.potrf!('U', [1.0 0.0; 0.0 -1.0])
if info != 2 # mangled info code
if info == 2^33
error("BLAS and LAPACK are compiled with 32-bit integer support, but Julia expects 64-bit integers. Please build Julia with USE_BLAS64=0.")
Original file line number Diff line number Diff line change
@@ -59,13 +59,13 @@ julia> A = [1 2; 2 3]
2 3
julia> bkfact(A)
Base.LinAlg.BunchKaufman{Float64,Array{Float64,2}}
LinearAlgebra.BunchKaufman{Float64,Array{Float64,2}}
D factor:
2×2 Tridiagonal{Float64,Array{Float64,1}}:
-0.333333 0.0
0.0 3.0
U factor:
2×2 Base.LinAlg.UnitUpperTriangular{Float64,Array{Float64,2}}:
2×2 LinearAlgebra.UnitUpperTriangular{Float64,Array{Float64,2}}:
1.0 0.666667
0.0 1.0
permutation:
@@ -135,14 +135,14 @@ julia> A = [1 2 3; 2 1 2; 3 2 1]
3 2 1
julia> F = bkfact(Symmetric(A, :L))
Base.LinAlg.BunchKaufman{Float64,Array{Float64,2}}
LinearAlgebra.BunchKaufman{Float64,Array{Float64,2}}
D factor:
3×3 Tridiagonal{Float64,Array{Float64,1}}:
1.0 3.0 ⋅
3.0 1.0 0.0
⋅ 0.0 -1.0
L factor:
3×3 Base.LinAlg.UnitLowerTriangular{Float64,Array{Float64,2}}:
3×3 LinearAlgebra.UnitLowerTriangular{Float64,Array{Float64,2}}:
1.0 0.0 0.0
0.0 1.0 0.0
0.5 0.5 1.0
Original file line number Diff line number Diff line change
@@ -135,7 +135,7 @@ end
# chol!. Destructive methods for computing Cholesky factor of real symmetric or Hermitian
# matrix
function chol!(A::RealHermSymComplexHerm{<:Real,<:StridedMatrix})
C, info = _chol!(A.uplo == 'U' ? A.data : LinAlg.copytri!(A.data, 'L', true), UpperTriangular)
C, info = _chol!(A.uplo == 'U' ? A.data : LinearAlgebra.copytri!(A.data, 'L', true), UpperTriangular)
@assertposdef C info
end
function chol!(A::StridedMatrix)
@@ -153,7 +153,7 @@ function chol(A::RealHermSymComplexHerm)
if A.uplo == 'U'
copyto!(AA, A.data)
else
Base.adjoint!(AA, A.data)
adjoint!(AA, A.data)
end
chol!(Hermitian(AA, :U))
end
@@ -298,7 +298,7 @@ julia> A = [4. 12. -16.; 12. 37. -43.; -16. -43. 98.]
-16.0 -43.0 98.0
julia> C = cholfact(A)
Base.LinAlg.Cholesky{Float64,Array{Float64,2}}
LinearAlgebra.Cholesky{Float64,Array{Float64,2}}
U factor:
3×3 UpperTriangular{Float64,Array{Float64,2}}:
2.0 6.0 -8.0
@@ -616,7 +616,7 @@ function lowrankdowndate!(C::Cholesky, v::StridedVector)
s = conj(v[i]/Aii)
s2 = abs2(s)
if s2 > 1
throw(LinAlg.PosDefException(i))
throw(LinearAlgebra.PosDefException(i))
end
c = sqrt(1 - abs2(s))

File renamed without changes.
File renamed without changes.
1,264 changes: 1,264 additions & 0 deletions stdlib/LinearAlgebra/src/deprecated.jl

Large diffs are not rendered by default.

File renamed without changes.
2 changes: 1 addition & 1 deletion base/linalg/eigen.jl → stdlib/LinearAlgebra/src/eigen.jl
Original file line number Diff line number Diff line change
@@ -75,7 +75,7 @@ make rows and columns more equal in norm. The default is `true` for both options
# Examples
```jldoctest
julia> F = eigfact([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0])
Base.LinAlg.Eigen{Float64,Float64,Array{Float64,2},Array{Float64,1}}([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0])
LinearAlgebra.Eigen{Float64,Float64,Array{Float64,2},Array{Float64,1}}([1.0, 3.0, 18.0], [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0])
julia> F.values
3-element Array{Float64,1}:
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -24,12 +24,12 @@ Test that a factorization of a matrix succeeded.
```jldoctest
julia> F = cholfact([1 0; 0 1]);
julia> LinAlg.issuccess(F)
julia> LinearAlgebra.issuccess(F)
true
julia> F = lufact([1 0; 0 0]);
julia> LinAlg.issuccess(F)
julia> LinearAlgebra.issuccess(F)
false
```
"""
Original file line number Diff line number Diff line change
@@ -1205,7 +1205,7 @@ scale!(b::AbstractVector, A::AbstractMatrix) = scale!(A,b,A)
peakflops(n::Integer=2000; parallel::Bool=false)
`peakflops` computes the peak flop rate of the computer by using double precision
[`gemm!`](@ref Base.LinAlg.BLAS.gemm!). By default, if no arguments are specified, it
[`gemm!`](@ref LinearAlgebra.BLAS.gemm!). By default, if no arguments are specified, it
multiplies a matrix of size `n x n`, where `n = 2000`. If the underlying BLAS is using
multiple threads, higher flop rates are realized. The number of BLAS threads can be set with
[`BLAS.set_num_threads(n)`](@ref).
8 changes: 4 additions & 4 deletions base/linalg/givens.jl → stdlib/LinearAlgebra/src/givens.jl
Original file line number Diff line number Diff line change
@@ -17,7 +17,7 @@ function _absvecormat_mul_adjrot(A::AbstractVecOrMat{T}, adjR::Adjoint{<:Any,<:A
mul!(TS == T ? copy(A) : convert(AbstractArray{TS}, A), adjoint(convert(AbstractRotation{TS}, R)))
end
"""
LinAlg.Givens(i1,i2,c,s) -> G
LinearAlgebra.Givens(i1,i2,c,s) -> G
A Givens rotation linear operator. The fields `c` and `s` represent the cosine and sine of
the rotation angle, respectively. The `Givens` type supports left multiplication `G*A` and
@@ -258,7 +258,7 @@ y[i1] = r
y[i2] = 0
```
See also: [`LinAlg.Givens`](@ref)
See also: [`LinearAlgebra.Givens`](@ref)
"""
function givens(f::T, g::T, i1::Integer, i2::Integer) where T
if i1 == i2
@@ -284,7 +284,7 @@ B[i1,j] = r
B[i2,j] = 0
```
See also: [`LinAlg.Givens`](@ref)
See also: [`LinearAlgebra.Givens`](@ref)
"""
givens(A::AbstractMatrix, i1::Integer, i2::Integer, j::Integer) =
givens(A[i1,j], A[i2,j],i1,i2)
@@ -303,7 +303,7 @@ B[i1] = r
B[i2] = 0
```
See also: [`LinAlg.Givens`](@ref)
See also: [`LinearAlgebra.Givens`](@ref)
"""
givens(x::AbstractVector, i1::Integer, i2::Integer) =
givens(x[i1], x[i2], i1, i2)
File renamed without changes.
6 changes: 4 additions & 2 deletions base/linalg/lapack.jl → stdlib/LinearAlgebra/src/lapack.jl
Original file line number Diff line number Diff line change
@@ -7,11 +7,13 @@ Interfaces to LAPACK subroutines.

const liblapack = Base.liblapack_name

import ..LinAlg.BLAS.@blasfunc
import ..LinearAlgebra.BLAS.@blasfunc

import ..LinAlg: BlasFloat, Char, BlasInt, LAPACKException,
import ..LinearAlgebra: BlasFloat, Char, BlasInt, LAPACKException,
DimensionMismatch, SingularException, PosDefException, chkstride1, checksquare

using ..LinearAlgebra: triu, dot

using Base: iszero

#Generic LAPACK error handlers
File renamed without changes.
File renamed without changes.
6 changes: 3 additions & 3 deletions base/linalg/lu.jl → stdlib/LinearAlgebra/src/lu.jl
Original file line number Diff line number Diff line change
@@ -43,7 +43,7 @@ julia> A = [4. 3.; 6. 3.]
4.0 3.0
julia> F = lufact!(A)
Base.LinAlg.LU{Float64,Array{Float64,2}}
LinearAlgebra.LU{Float64,Array{Float64,2}}
L factor:
2×2 Array{Float64,2}:
1.0 0.0
@@ -162,7 +162,7 @@ julia> A = [4 3; 6 3]
6 3
julia> F = lufact(A)
Base.LinAlg.LU{Float64,Array{Float64,2}}
LinearAlgebra.LU{Float64,Array{Float64,2}}
L factor:
2×2 Array{Float64,2}:
1.0 0.0
@@ -592,7 +592,7 @@ AbstractArray(F::LU) = AbstractMatrix(F)
Matrix(F::LU) = Array(AbstractArray(F))
Array(F::LU) = Matrix(F)

function Tridiagonal(F::Base.LinAlg.LU{T,Tridiagonal{T,V}}) where {T,V}
function Tridiagonal(F::LU{T,Tridiagonal{T,V}}) where {T,V}
n = size(F, 1)

dl = copy(F.factors.dl)
8 changes: 4 additions & 4 deletions base/linalg/matmul.jl → stdlib/LinearAlgebra/src/matmul.jl
Original file line number Diff line number Diff line change
@@ -432,15 +432,15 @@ function copyto!(B::AbstractVecOrMat, ir_dest::UnitRange{Int}, jr_dest::UnitRang
if tM == 'N'
copyto!(B, ir_dest, jr_dest, M, ir_src, jr_src)
else
Base.copy_transpose!(B, ir_dest, jr_dest, M, jr_src, ir_src)
LinearAlgebra.copy_transpose!(B, ir_dest, jr_dest, M, jr_src, ir_src)
tM == 'C' && conj!(B)
end
B
end

function copy_transpose!(B::AbstractMatrix, ir_dest::UnitRange{Int}, jr_dest::UnitRange{Int}, tM::Char, M::AbstractVecOrMat, ir_src::UnitRange{Int}, jr_src::UnitRange{Int})
if tM == 'N'
Base.copy_transpose!(B, ir_dest, jr_dest, M, ir_src, jr_src)
LinearAlgebra.copy_transpose!(B, ir_dest, jr_dest, M, ir_src, jr_src)
else
copyto!(B, ir_dest, jr_dest, M, jr_src, ir_src)
tM == 'C' && conj!(B)
@@ -567,7 +567,7 @@ function _generic_matmatmul!(C::AbstractVecOrMat{R}, tA, tB, A::AbstractVecOrMat
z = convert(promote_type(typeof(z1), R), z1)

if mA < tile_size && nA < tile_size && nB < tile_size
Base.copy_transpose!(Atile, 1:nA, 1:mA, tA, A, 1:mA, 1:nA)
copy_transpose!(Atile, 1:nA, 1:mA, tA, A, 1:mA, 1:nA)
copyto!(Btile, 1:mB, 1:nB, tB, B, 1:mB, 1:nB)
for j = 1:nB
boff = (j-1)*tile_size
@@ -593,7 +593,7 @@ function _generic_matmatmul!(C::AbstractVecOrMat{R}, tA, tB, A::AbstractVecOrMat
for kb = 1:tile_size:nA
klim = min(kb+tile_size-1,mB)
klen = klim-kb+1
Base.copy_transpose!(Atile, 1:klen, 1:ilen, tA, A, ib:ilim, kb:klim)
copy_transpose!(Atile, 1:klen, 1:ilen, tA, A, ib:ilim, kb:klim)
copyto!(Btile, 1:klen, 1:jlen, tB, B, kb:klim, jb:jlim)
for j=1:jlen
bcoff = (j-1)*tile_size
18 changes: 9 additions & 9 deletions base/linalg/qr.jl → stdlib/LinearAlgebra/src/qr.jl
Original file line number Diff line number Diff line change
@@ -184,13 +184,13 @@ function qrfactPivotedUnblocked!(A::StridedMatrix)

# Compute reflector of columns j
x = view(A, j:m, j)
τj = LinAlg.reflector!(x)
τj = LinearAlgebra.reflector!(x)
τ[j] = τj

# Update trailing submatrix with reflector
LinAlg.reflectorApply!(x, τj, view(A, j:m, j+1:n))
LinearAlgebra.reflectorApply!(x, τj, view(A, j:m, j+1:n))
end
return LinAlg.QRPivoted{eltype(A), typeof(A)}(A, τ, piv)
return LinearAlgebra.QRPivoted{eltype(A), typeof(A)}(A, τ, piv)
end

# LAPACK version
@@ -216,7 +216,7 @@ julia> a = [1. 2.; 3. 4.]
3.0 4.0
julia> qrfact!(a)
Base.LinAlg.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R:
LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R:
[-0.316228 -0.948683; -0.948683 0.316228]
[-3.16228 -4.42719; 0.0 -0.632456]
@@ -280,7 +280,7 @@ julia> A = [3.0 -6.0; 4.0 -8.0; 0.0 1.0]
0.0 1.0
julia> F = qrfact(A)
Base.LinAlg.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R:
LinearAlgebra.QRCompactWY{Float64,Array{Float64,2}} with factors Q and R:
[-0.6 0.0 0.8; -0.8 0.0 -0.6; 0.0 -1.0 0.0]
[-5.0 10.0; 0.0 -1.0]
@@ -346,7 +346,7 @@ Returns `w`, a unit vector in the direction of `v`, and
`r`, the norm of `v`.
See also [`normalize`](@ref), [`normalize!`](@ref),
and [`LinAlg.qr!`](@ref).
and [`LinearAlgebra.qr!`](@ref).
# Examples
```jldoctest
@@ -374,7 +374,7 @@ function qr(v::AbstractVector)
end

"""
LinAlg.qr!(v::AbstractVector) -> w, r
LinearAlgebra.qr!(v::AbstractVector) -> w, r
Computes the polar decomposition of a vector. Instead of returning a new vector
as `qr(v::AbstractVector)`, this function mutates the input vector `v` in place.
@@ -391,7 +391,7 @@ julia> v = [1.; 2.]
1.0
2.0
julia> w, r = Base.LinAlg.qr!(v)
julia> w, r = LinearAlgebra.qr!(v)
([0.447214, 0.894427], 2.23606797749979)
julia> w === v
@@ -806,7 +806,7 @@ function ldiv!(A::QR{T}, B::StridedMatrix{T}) where T
end
end
end
Base.LinAlg.ldiv!(UpperTriangular(view(R, :, 1:minmn)), view(B, 1:minmn, :))
LinearAlgebra.ldiv!(UpperTriangular(view(R, :, 1:minmn)), view(B, 1:minmn, :))
if n > m # Apply elementary transformation to solution
B[m + 1:mB,1:nB] = zero(T)
for j = 1:nB
File renamed without changes.
12 changes: 6 additions & 6 deletions base/linalg/schur.jl → stdlib/LinearAlgebra/src/schur.jl
Original file line number Diff line number Diff line change
@@ -22,7 +22,7 @@ julia> A = [5. 7.; -2. -4.]
-2.0 -4.0
julia> F = schurfact!(A)
Base.LinAlg.Schur{Float64,Array{Float64,2}} with factors T and Z:
LinearAlgebra.Schur{Float64,Array{Float64,2}} with factors T and Z:
[3.0 9.0; 0.0 -2.0]
[0.961524 0.274721; -0.274721 0.961524]
and values:
@@ -34,7 +34,7 @@ julia> A
0.0 -2.0
```
"""
schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinAlg.LAPACK.gees!('V', A)...)
schurfact!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...)

"""
schurfact(A::StridedMatrix) -> F::Schur
@@ -52,7 +52,7 @@ julia> A = [5. 7.; -2. -4.]
-2.0 -4.0
julia> F = schurfact(A)
Base.LinAlg.Schur{Float64,Array{Float64,2}} with factors T and Z:
LinearAlgebra.Schur{Float64,Array{Float64,2}} with factors T and Z:
[3.0 9.0; 0.0 -2.0]
[0.961524 0.274721; -0.274721 0.961524]
and values:
@@ -158,7 +158,7 @@ ordschur(schur::Schur, select::Union{Vector{Bool},BitVector}) =
Same as [`ordschur`](@ref) but overwrites the input arguments.
"""
ordschur!(T::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} =
LinAlg.LAPACK.trsen!(convert(Vector{BlasInt}, select), T, Z)[1:3]
LinearAlgebra.LAPACK.trsen!(convert(Vector{BlasInt}, select), T, Z)[1:3]

"""
ordschur(T::StridedMatrix, Z::StridedMatrix, select::Union{Vector{Bool},BitVector}) -> T::StridedMatrix, Z::StridedMatrix, λ::Vector
@@ -196,7 +196,7 @@ end
Same as [`schurfact`](@ref) but uses the input matrices `A` and `B` as workspace.
"""
schurfact!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} =
GeneralizedSchur(LinAlg.LAPACK.gges!('V', 'V', A, B)...)
GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...)

"""
schurfact(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur
@@ -246,7 +246,7 @@ Same as [`ordschur`](@ref) but overwrites the factorization the input arguments.
"""
ordschur!(S::StridedMatrix{Ty}, T::StridedMatrix{Ty}, Q::StridedMatrix{Ty},
Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} =
LinAlg.LAPACK.tgsen!(convert(Vector{BlasInt}, select), S, T, Q, Z)
LinearAlgebra.LAPACK.tgsen!(convert(Vector{BlasInt}, select), S, T, Q, Z)

"""
ordschur(S::StridedMatrix, T::StridedMatrix, Q::StridedMatrix, Z::StridedMatrix, select) -> S::StridedMatrix, T::StridedMatrix, Q::StridedMatrix, Z::StridedMatrix, α::Vector, β::Vector
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@ for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular,
$t(A::$t) = A
$t{T}(A::$t{T}) where {T} = A
function $t(A::AbstractMatrix)
Base.LinAlg.checksquare(A)
checksquare(A)
return $t{eltype(A), typeof(A)}(A)
end

File renamed without changes.
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@

import Base: copy, adjoint, getindex, show, transpose, one, zero, inv,
hcat, vcat, hvcat
import Base.LinAlg: SingularException

"""
UniformScaling{T<:Number}
@@ -242,7 +241,7 @@ function isapprox(J::UniformScaling, A::AbstractMatrix;
rtol::Real = Base.rtoldefault(promote_leaf_eltypes(A), eltype(J), atol),
nans::Bool = false, norm::Function = vecnorm)
n = checksquare(A)
normJ = norm === Base.norm ? abs(J.λ) :
normJ = norm === LinearAlgebra.norm ? abs(J.λ) :
norm === vecnorm ? abs(J.λ) * sqrt(n) :
norm(Diagonal(fill(J.λ, n)))
return norm(A - J) <= max(atol, rtol * max(norm(A), normJ))
@@ -378,7 +377,7 @@ chol(J::UniformScaling, args...) = ((C, info) = _chol!(J, nothing); @assertposde


## Matrix construction from UniformScaling
Matrix{T}(s::UniformScaling, dims::Dims{2}) where {T} = setindex!(zeros(T, dims), T(s.λ), diagind(dims...))
Matrix{T}(s::UniformScaling, dims::Dims{2}) where {T} = setindex!(Base.zeros(T, dims), T(s.λ), diagind(dims...))
Matrix{T}(s::UniformScaling, m::Integer, n::Integer) where {T} = Matrix{T}(s, Dims((m, n)))
Matrix(s::UniformScaling, m::Integer, n::Integer) = Matrix(s, Dims((m, n)))
Matrix(s::UniformScaling, dims::Dims{2}) = Matrix{eltype(s)}(s, dims)
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

# This file is a part of Julia. License is MIT: https://julialang.org/license
module TestAdjointTranspose

using Test
using Base.LinAlg: Adjoint, Transpose
using SparseArrays
using Test, LinearAlgebra, SparseArrays

@testset "Adjoint and Transpose inner constructor basics" begin
intvec, intmat = [1, 2], [1 2; 3 4]
@@ -448,3 +446,5 @@ end
@test adjoint!(a, b) === a
@test adjoint!(b, a) === b
end

end # module TestAdjointTranspose
22 changes: 13 additions & 9 deletions test/linalg/bidiag.jl → stdlib/LinearAlgebra/test/bidiag.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
using Base.LinAlg: mul!
import Base.LinAlg: BlasReal, BlasFloat
using SparseArrays
module TestBidiagonal

using Test, LinearAlgebra, SparseArrays, Random
using LinearAlgebra: BlasReal, BlasFloat

include("testutils.jl") # test_approx_eq_modphase

n = 10 #Size of test matrix
srand(1)
@@ -194,7 +196,7 @@ srand(1)
end
x = T \ b
tx = Tfull \ b
@test_throws DimensionMismatch Base.LinAlg.naivesub!(T,Vector{elty}(uninitialized,n+1))
@test_throws DimensionMismatch LinearAlgebra.naivesub!(T,Vector{elty}(uninitialized,n+1))
@test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf))
@testset "Generic Mat-vec ops" begin
@test T*b ≈ Tfull*b
@@ -233,7 +235,7 @@ srand(1)
d2, v2 = eig(map(elty<:Complex ? ComplexF64 : Float64,Tfull))
@test (uplo == :U ? d1 : reverse(d1)) ≈ d2
if elty <: Real
Test.test_approx_eq_modphase(v1, uplo == :U ? v2 : v2[:,n:-1:1])
test_approx_eq_modphase(v1, uplo == :U ? v2 : v2[:,n:-1:1])
end
end
end
@@ -246,8 +248,8 @@ srand(1)
u2, d2, v2 = svd(T)
@test d1 ≈ d2
if elty <: Real
Test.test_approx_eq_modphase(u1, u2)
Test.test_approx_eq_modphase(v1, v2)
test_approx_eq_modphase(u1, u2)
test_approx_eq_modphase(v1, v2)
end
@test 0 ≈ vecnorm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*Diagonal(d1)*v1'-Tfull))
@inferred svdvals(T)
@@ -303,7 +305,7 @@ end
@test promote(C,A) isa Tuple{Tridiagonal, Tridiagonal}
end

using Base.LinAlg: fillstored!, UnitLowerTriangular
using LinearAlgebra: fillstored!, UnitLowerTriangular
@testset "fill! and fillstored!" begin
let # fillstored!
A = Tridiagonal(randn(2), randn(3), randn(2))
@@ -362,3 +364,5 @@ end
@test promote_type(Tridiagonal{Tuple{T}} where T<:Integer, Bidiagonal{Tuple{S}} where S<:Integer) <: Tridiagonal
@test promote_type(Tridiagonal{Tuple{T}} where T<:Integer, Bidiagonal{Int}) <: Tridiagonal
end

end # module TestBidiagonal
29 changes: 16 additions & 13 deletions test/linalg/blas.jl → stdlib/LinearAlgebra/test/blas.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Random
module TestBLAS

import Base.LinAlg, Base.LinAlg.BlasReal, Base.LinAlg.BlasComplex
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasReal, BlasComplex

srand(100)
## BLAS tests - testing the interface code to BLAS routines
@@ -16,10 +17,10 @@ srand(100)
end
U = convert(Array{elty, 2}, U)
V = convert(Array{elty, 2}, V)
@test tril(LinAlg.BLAS.syr2k('L','N',U,V)) ≈ tril(U*transpose(V) + V*transpose(U))
@test triu(LinAlg.BLAS.syr2k('U','N',U,V)) ≈ triu(U*transpose(V) + V*transpose(U))
@test tril(LinAlg.BLAS.syr2k('L','T',U,V)) ≈ tril(transpose(U)*V + transpose(V)*U)
@test triu(LinAlg.BLAS.syr2k('U','T',U,V)) ≈ triu(transpose(U)*V + transpose(V)*U)
@test tril(LinearAlgebra.BLAS.syr2k('L','N',U,V)) ≈ tril(U*transpose(V) + V*transpose(U))
@test triu(LinearAlgebra.BLAS.syr2k('U','N',U,V)) ≈ triu(U*transpose(V) + V*transpose(U))
@test tril(LinearAlgebra.BLAS.syr2k('L','T',U,V)) ≈ tril(transpose(U)*V + transpose(V)*U)
@test triu(LinearAlgebra.BLAS.syr2k('U','T',U,V)) ≈ triu(transpose(U)*V + transpose(V)*U)
end

if elty in (ComplexF32, ComplexF64)
@@ -30,10 +31,10 @@ srand(100)
V = complex.(V, V)
U = convert(Array{elty, 2}, U)
V = convert(Array{elty, 2}, V)
@test tril(LinAlg.BLAS.her2k('L','N',U,V)) ≈ tril(U*V' + V*U')
@test triu(LinAlg.BLAS.her2k('U','N',U,V)) ≈ triu(U*V' + V*U')
@test tril(LinAlg.BLAS.her2k('L','C',U,V)) ≈ tril(U'*V + V'*U)
@test triu(LinAlg.BLAS.her2k('U','C',U,V)) ≈ triu(U'*V + V'*U)
@test tril(LinearAlgebra.BLAS.her2k('L','N',U,V)) ≈ tril(U*V' + V*U')
@test triu(LinearAlgebra.BLAS.her2k('U','N',U,V)) ≈ triu(U*V' + V*U')
@test tril(LinearAlgebra.BLAS.her2k('L','C',U,V)) ≈ tril(U'*V + V'*U)
@test triu(LinearAlgebra.BLAS.her2k('U','C',U,V)) ≈ triu(U'*V + V'*U)
end
end

@@ -264,7 +265,7 @@ srand(100)
@test all(BLAS.gemm('N', 'N', el2, I4, I4) .== el2 * I4)
@test all(BLAS.gemm('N', 'T', el2, I4, I4) .== el2 * I4)
@test all(BLAS.gemm('T', 'N', el2, I4, I4) .== el2 * I4)
@test all(LinAlg.BLAS.gemm('T', 'T', el2, I4, I4) .== el2 * I4)
@test all(LinearAlgebra.BLAS.gemm('T', 'T', el2, I4, I4) .== el2 * I4)
I4cp = copy(I4)
@test all(BLAS.gemm!('N', 'N', one(elty), I4, I4, elm1, I4cp) .== Z4)
@test all(I4cp .== Z4)
@@ -297,7 +298,7 @@ srand(100)
@test all(tril(BLAS.herk('L', 'C', L4)) .== tril(BLAS.gemm('T', 'N', L4, L4)))
ans = similar(L4)
@test all(tril(BLAS.herk('L','C', L4)) .== tril(BLAS.herk!('L', 'C', real(one(elty)), L4, real(zero(elty)), ans)))
@test all(Base.LinAlg.copytri!(ans, 'L') .== LinAlg.BLAS.gemm('T', 'N', L4, L4))
@test all(LinearAlgebra.copytri!(ans, 'L') .== LinearAlgebra.BLAS.gemm('T', 'N', L4, L4))
@test_throws DimensionMismatch BLAS.herk!('L','N',real(one(elty)),Matrix{elty}(I, 5, 5),real(one(elty)), Matrix{elty}(I, 6, 6))
else
@test all(triu(BLAS.syrk('U', 'N', U4)) .== triu(BLAS.gemm('N', 'T', U4, U4)))
@@ -310,7 +311,7 @@ srand(100)
@test all(tril(BLAS.syrk('L', 'T', L4)) .== tril(BLAS.gemm('T', 'N', L4, L4)))
ans = similar(L4)
@test all(tril(BLAS.syrk('L','T', L4)) .== tril(BLAS.syrk!('L', 'T', one(elty), L4, zero(elty), ans)))
@test all(Base.LinAlg.copytri!(ans, 'L') .== BLAS.gemm('T', 'N', L4, L4))
@test all(LinearAlgebra.copytri!(ans, 'L') .== BLAS.gemm('T', 'N', L4, L4))
@test_throws DimensionMismatch BLAS.syrk!('L','N',one(elty), Matrix{elty}(I, 5, 5),one(elty), Matrix{elty}(I, 6, 6))
end
end
@@ -440,3 +441,5 @@ Base.stride(A::WrappedArray, i::Int) = stride(A.A, i)
@test C == WrappedArray([63 138+38im; 35+27im 352])
end
end

end # module TestBLAS
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestBunchKaufman

using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted
using Base: getproperty
using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted

n = 10

@@ -34,11 +35,11 @@ bimg = randn(n,2)/2
ε = εa = eps(abs(float(one(eltya))))

# check that factorize gives a Bunch-Kaufman
@test isa(factorize(asym), LinAlg.BunchKaufman)
@test isa(factorize(aher), LinAlg.BunchKaufman)
@test isa(factorize(asym), LinearAlgebra.BunchKaufman)
@test isa(factorize(aher), LinearAlgebra.BunchKaufman)
@testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U)
bc1 = bkfact(Hermitian(aher, uplo))
@test LinAlg.issuccess(bc1)
@test LinearAlgebra.issuccess(bc1)
@test logabsdet(bc1)[1] ≈ log(abs(det(bc1)))
if eltya <: Real
@test logabsdet(bc1)[2] == sign(det(bc1))
@@ -87,7 +88,7 @@ bimg = randn(n,2)/2
@testset "$uplo Bunch-Kaufman factors of a pos-def matrix" for uplo in (:U, :L)
@testset "rook pivoting: $rook" for rook in (false, true)
bc2 = bkfact(Hermitian(apd, uplo), rook)
@test LinAlg.issuccess(bc2)
@test LinearAlgebra.issuccess(bc2)
bks = split(sprint(show, "text/plain", bc2), "\n")
@test bks[1] == summary(bc2)
@test bks[2] == "D factor:"
@@ -114,13 +115,13 @@ bimg = randn(n,2)/2
@testset "$uplo Bunch-Kaufman factors of a singular matrix" for uplo in (:L, :U)
@testset for rook in (false, true)
F = bkfact(issymmetric(As) ? Symmetric(As, uplo) : Hermitian(As, uplo), rook)
@test !LinAlg.issuccess(F)
@test !LinearAlgebra.issuccess(F)
# test printing of this as well!
bks = sprint(show, "text/plain", F)
@test bks == "Failed factorization of type $(typeof(F))"
@test det(F) == 0
@test_throws LinAlg.SingularException inv(F)
@test_throws LinAlg.SingularException F \ fill(1., size(As,1))
@test_throws LinearAlgebra.SingularException inv(F)
@test_throws LinearAlgebra.SingularException F \ fill(1., size(As,1))
end
end
end
@@ -139,3 +140,5 @@ end

@test_throws DomainError logdet(bkfact([-1 -1; -1 1]))
@test logabsdet(bkfact([8 4; 4 2]))[1] == -Inf

end # module TestBunchKaufman
37 changes: 20 additions & 17 deletions test/linalg/cholesky.jl → stdlib/LinearAlgebra/test/cholesky.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestCholesky

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted, PosDefException
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, PosDefException

function unary_ops_tests(a, ca, tol; n=size(a, 1))
@test inv(ca)*a ≈ Matrix(I, n, n)
@@ -65,7 +66,7 @@ end
@testset "throw for non-square input" begin
A = rand(eltya, 2, 3)
@test_throws DimensionMismatch chol(A)
@test_throws DimensionMismatch Base.LinAlg.chol!(A)
@test_throws DimensionMismatch LinearAlgebra.chol!(A)
@test_throws DimensionMismatch cholfact(A)
@test_throws DimensionMismatch cholfact!(A)
end
@@ -83,7 +84,7 @@ end
for i=1:n, j=1:n
@test E[i,j] <= (n+1)ε/(1-(n+1)ε)*real(sqrt(apd[i,i]*apd[j,j]))
end
@test LinAlg.issuccess(capd)
@test LinearAlgebra.issuccess(capd)
@inferred(logdet(capd))

apos = apd[1,1] # test chol(x::Number), needs x>0
@@ -129,7 +130,7 @@ end
#pivoted upper Cholesky
if eltya != BigFloat
cz = cholfact(Hermitian(zeros(eltya,n,n)), Val(true))
@test_throws Base.LinAlg.RankDeficientException Base.LinAlg.chkfullrank(cz)
@test_throws LinearAlgebra.RankDeficientException LinearAlgebra.chkfullrank(cz)
cpapd = cholfact(apdh, Val(true))
unary_ops_tests(apdh, cpapd, ε*κ*n)
@test rank(cpapd) == n
@@ -177,7 +178,7 @@ end
A = eltya[1 2; 2 1]; B = eltya[1, 1]
C = cholfact(A)
@test !isposdef(C)
@test !LinAlg.issuccess(C)
@test !LinearAlgebra.issuccess(C)
Cstr = sprint((t, s) -> show(t, "text/plain", s), C)
@test Cstr == "Failed factorization of type $(typeof(C))"
@test_throws PosDefException C\B
@@ -193,17 +194,17 @@ end
A = randn(5,5)
end
A = convert(Matrix{eltya}, A'A)
@test Matrix(cholfact(A).L) ≈ Matrix(invoke(Base.LinAlg._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1])
@test Matrix(cholfact(A).U) ≈ Matrix(invoke(Base.LinAlg._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1])
@test Matrix(cholfact(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1])
@test Matrix(cholfact(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1])
end
end
end
end

@testset "Cholesky factor of Matrix with non-commutative elements, here 2x2-matrices" begin
X = Matrix{Float64}[0.1*rand(2,2) for i in 1:3, j = 1:3]
L = Matrix(Base.LinAlg._chol!(X*X', LowerTriangular)[1])
U = Matrix(Base.LinAlg._chol!(X*X', UpperTriangular)[1])
L = Matrix(LinearAlgebra._chol!(X*X', LowerTriangular)[1])
U = Matrix(LinearAlgebra._chol!(X*X', UpperTriangular)[1])
XX = Matrix(X*X')

@test sum(sum(norm, L*L' - XX)) < eps()
@@ -220,10 +221,10 @@ end
BcB = (BcB + BcB')/2
F = cholfact(Hermitian(AcA, uplo))
G = cholfact(Hermitian(BcB, uplo))
@test Base.getproperty(LinAlg.lowrankupdate(F, v), uplo) ≈ Base.getproperty(G, uplo)
@test_throws DimensionMismatch LinAlg.lowrankupdate(F, Vector{eltype(v)}(uninitialized,length(v)+1))
@test Base.getproperty(LinAlg.lowrankdowndate(G, v), uplo) ≈ Base.getproperty(F, uplo)
@test_throws DimensionMismatch LinAlg.lowrankdowndate(G, Vector{eltype(v)}(uninitialized,length(v)+1))
@test Base.getproperty(LinearAlgebra.lowrankupdate(F, v), uplo) ≈ Base.getproperty(G, uplo)
@test_throws DimensionMismatch LinearAlgebra.lowrankupdate(F, Vector{eltype(v)}(uninitialized,length(v)+1))
@test Base.getproperty(LinearAlgebra.lowrankdowndate(G, v), uplo) ≈ Base.getproperty(F, uplo)
@test_throws DimensionMismatch LinearAlgebra.lowrankdowndate(G, Vector{eltype(v)}(uninitialized,length(v)+1))
end
end

@@ -262,13 +263,15 @@ end
R = randn(5, 5)
C = complex.(R, R)
for A in (R, C)
@test !LinAlg.issuccess(cholfact(A))
@test !LinAlg.issuccess(cholfact!(copy(A)))
@test !LinearAlgebra.issuccess(cholfact(A))
@test !LinearAlgebra.issuccess(cholfact!(copy(A)))
@test_throws PosDefException chol(A)
@test_throws PosDefException Base.LinAlg.chol!(copy(A))
@test_throws PosDefException LinearAlgebra.chol!(copy(A))
end
end

@testset "fail for non-BLAS element types" begin
@test_throws ArgumentError cholfact!(Hermitian(rand(Float16, 5,5)), Val(true))
end

end # module TestCholesky
19 changes: 11 additions & 8 deletions test/linalg/dense.jl → stdlib/LinearAlgebra/test/dense.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestDense

using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal

@testset "Check that non-floats are correctly promoted" begin
@test [1 0 0; 0 1 0]\[1,1] ≈ [1;1;0]
end

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal

n = 10

# Split n into 2 parts for tests needing two matrices
@@ -164,7 +165,7 @@ end
@test norm(x, 2) ≈ sqrt(10)
@test norm(x, 3) ≈ cbrt(10)
@test norm(x, Inf) ≈ 1
if elty <: Base.LinAlg.BlasFloat
if elty <: LinearAlgebra.BlasFloat
@test norm(x, 1:4) ≈ 2
@test_throws BoundsError norm(x,-1:4)
@test_throws BoundsError norm(x,1:11)
@@ -690,13 +691,13 @@ end

#Ah : Hermitian Matrix
Ah = convert(Matrix{elty}, [3 1; 1 3])
if elty <: Base.LinAlg.BlasComplex
if elty <: LinearAlgebra.BlasComplex
Ah += [0 im; -im 0]
end

#ADi : Diagonal Matrix
ADi = convert(Matrix{elty}, [3 0; 0 3])
if elty <: Base.LinAlg.BlasComplex
if elty <: LinearAlgebra.BlasComplex
ADi += [im 0; 0 im]
end

@@ -823,8 +824,8 @@ end
A = rand(10,10)
B = view(A, 2:2:10, 2:2:10)

@test Base.LinAlg.stride1(a) == 1
@test Base.LinAlg.stride1(b) == 2
@test LinearAlgebra.stride1(a) == 1
@test LinearAlgebra.stride1(b) == 2

@test strides(a) == (1,)
@test strides(b) == (2,)
@@ -843,3 +844,5 @@ end
end
end
end

end # module TestDense
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
using Base.LinAlg: mul!, ldiv!, rdiv!
import Base.LinAlg: BlasFloat, BlasComplex, SingularException
using SparseArrays
module TestDiagonal

using Test, LinearAlgebra, SparseArrays, Random
using LinearAlgebra: mul!, ldiv!, rdiv!, BlasFloat, BlasComplex, SingularException

n=12 #Size of matrix problem to test
srand(1)
@@ -336,7 +336,7 @@ end
@test Matrix(1.0I, 5, 5) \ Diagonal(fill(1.,5)) == Matrix(I, 5, 5)

@testset "Triangular and Diagonal" begin
for T in (LowerTriangular(randn(5,5)), LinAlg.UnitLowerTriangular(randn(5,5)))
for T in (LowerTriangular(randn(5,5)), LinearAlgebra.UnitLowerTriangular(randn(5,5)))
D = Diagonal(randn(5))
@test T*D == Array(T)*Array(D)
@test T'D == Array(T)'*Array(D)
@@ -427,3 +427,5 @@ end
@test Diagonal(adjoint([1, 2, 3])) == Diagonal([1 2 3])
@test Diagonal(transpose([1, 2, 3])) == Diagonal([1 2 3])
end

end # module TestDiagonal
7 changes: 5 additions & 2 deletions test/linalg/eigen.jl → stdlib/LinearAlgebra/test/eigen.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestEigen

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted

n = 10

@@ -120,3 +121,5 @@ end
@test λ ≈ [0.5, 2/3]
end
end

end # module TestEigen
134 changes: 69 additions & 65 deletions test/linalg/generic.jl → stdlib/LinearAlgebra/test/generic.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

module TestGeneric

using Test, LinearAlgebra, Random
import Base: -, *, /, \
using Test, Random

# A custom Quaternion type with minimal defined interface and methods.
# Used to test scale and scale! methods to show non-commutativity.
@@ -102,52 +104,52 @@ end
@test_throws ArgumentError diff(X,-1)
end

@testset "linrange" begin
# make sure unequal input arrays throw an error
x = [2; 5; 6]
y = [3; 7; 10; 10]
@test_throws DimensionMismatch linreg(x, y)
x = [2 5 6]
y = [3; 7; 10]
@test_throws MethodError linreg(x, y)

# check (UnitRange, Array)
x = 1:12
y = [5.5; 6.3; 7.6; 8.8; 10.9; 11.79; 13.48; 15.02; 17.77; 20.81; 22.0; 22.99]
@test [linreg(x,y)...] ≈ [2.5559090909090867, 1.6960139860139862]
@test [linreg(view(x,1:6),view(y,1:6))...] ≈ [3.8366666666666642,1.3271428571428574]

# check (LinSpace, UnitRange)
x = linspace(1.0, 12.0, 100)
y = -100:-1
@test [linreg(x, y)...] ≈ [-109.0, 9.0]

# check (UnitRange, UnitRange)
x = 1:12
y = 12:-1:1
@test [linreg(x, y)...] ≈ [13.0, -1.0]

# check (LinSpace, LinSpace)
x = linspace(-5, 10, 100)
y = linspace(50, 200, 100)
@test [linreg(x, y)...] ≈ [100.0, 10.0]

# check (Array, Array)
# Anscombe's quartet (https://en.wikipedia.org/wiki/Anscombe%27s_quartet)
x123 = [10.0; 8.0; 13.0; 9.0; 11.0; 14.0; 6.0; 4.0; 12.0; 7.0; 5.0]
y1 = [8.04; 6.95; 7.58; 8.81; 8.33; 9.96; 7.24; 4.26; 10.84; 4.82; 5.68]
@test [linreg(x123,y1)...] ≈ [3.0,0.5] atol=15e-5

y2 = [9.14; 8.14; 8.74; 8.77; 9.26; 8.10; 6.12; 3.10; 9.13; 7.26; 4.74]
@test [linreg(x123,y2)...] ≈ [3.0,0.5] atol=10e-3

y3 = [7.46; 6.77; 12.74; 7.11; 7.81; 8.84; 6.08; 5.39; 8.15; 6.42; 5.73]
@test [linreg(x123,y3)...] ≈ [3.0,0.5] atol=10e-3

x4 = [8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 19.0; 8.0; 8.0; 8.0]
y4 = [6.58; 5.76; 7.71; 8.84; 8.47; 7.04; 5.25; 12.50; 5.56; 7.91; 6.89]
@test [linreg(x4,y4)...] ≈ [3.0,0.5] atol=10e-3
end
# @testset "linrange" begin
# # make sure unequal input arrays throw an error
# x = [2; 5; 6]
# y = [3; 7; 10; 10]
# @test_throws DimensionMismatch linreg(x, y)
# x = [2 5 6]
# y = [3; 7; 10]
# @test_throws MethodError linreg(x, y)

# # check (UnitRange, Array)
# x = 1:12
# y = [5.5; 6.3; 7.6; 8.8; 10.9; 11.79; 13.48; 15.02; 17.77; 20.81; 22.0; 22.99]
# @test [linreg(x,y)...] ≈ [2.5559090909090867, 1.6960139860139862]
# @test [linreg(view(x,1:6),view(y,1:6))...] ≈ [3.8366666666666642,1.3271428571428574]

# # check (LinSpace, UnitRange)
# x = linspace(1.0, 12.0, 100)
# y = -100:-1
# @test [linreg(x, y)...] ≈ [-109.0, 9.0]

# # check (UnitRange, UnitRange)
# x = 1:12
# y = 12:-1:1
# @test [linreg(x, y)...] ≈ [13.0, -1.0]

# # check (LinSpace, LinSpace)
# x = linspace(-5, 10, 100)
# y = linspace(50, 200, 100)
# @test [linreg(x, y)...] ≈ [100.0, 10.0]

# # check (Array, Array)
# # Anscombe's quartet (https://en.wikipedia.org/wiki/Anscombe%27s_quartet)
# x123 = [10.0; 8.0; 13.0; 9.0; 11.0; 14.0; 6.0; 4.0; 12.0; 7.0; 5.0]
# y1 = [8.04; 6.95; 7.58; 8.81; 8.33; 9.96; 7.24; 4.26; 10.84; 4.82; 5.68]
# @test [linreg(x123,y1)...] ≈ [3.0,0.5] atol=15e-5

# y2 = [9.14; 8.14; 8.74; 8.77; 9.26; 8.10; 6.12; 3.10; 9.13; 7.26; 4.74]
# @test [linreg(x123,y2)...] ≈ [3.0,0.5] atol=10e-3

# y3 = [7.46; 6.77; 12.74; 7.11; 7.81; 8.84; 6.08; 5.39; 8.15; 6.42; 5.73]
# @test [linreg(x123,y3)...] ≈ [3.0,0.5] atol=10e-3

# x4 = [8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 8.0; 19.0; 8.0; 8.0; 8.0]
# y4 = [6.58; 5.76; 7.71; 8.84; 8.47; 7.04; 5.25; 12.50; 5.56; 7.91; 6.89]
# @test [linreg(x4,y4)...] ≈ [3.0,0.5] atol=10e-3
# end

@testset "diag" begin
A = Matrix(1.0I, 4, 4)
@@ -161,12 +163,12 @@ end
x = ['a','b','c','d','e']
y = ['a','b','c','d','e']
α, β = 'f', 'g'
@test_throws DimensionMismatch Base.LinAlg.axpy!(α,x,['g'])
@test_throws DimensionMismatch Base.LinAlg.axpby!(α,x,β,['g'])
@test_throws BoundsError Base.LinAlg.axpy!(α,x,Vector(-1:5),y,Vector(1:7))
@test_throws BoundsError Base.LinAlg.axpy!(α,x,Vector(1:7),y,Vector(-1:5))
@test_throws BoundsError Base.LinAlg.axpy!(α,x,Vector(1:7),y,Vector(1:7))
@test_throws DimensionMismatch Base.LinAlg.axpy!(α,x,Vector(1:3),y,Vector(1:5))
@test_throws DimensionMismatch LinearAlgebra.axpy!(α,x,['g'])
@test_throws DimensionMismatch LinearAlgebra.axpby!(α,x,β,['g'])
@test_throws BoundsError LinearAlgebra.axpy!(α,x,Vector(-1:5),y,Vector(1:7))
@test_throws BoundsError LinearAlgebra.axpy!(α,x,Vector(1:7),y,Vector(-1:5))
@test_throws BoundsError LinearAlgebra.axpy!(α,x,Vector(1:7),y,Vector(1:7))
@test_throws DimensionMismatch LinearAlgebra.axpy!(α,x,Vector(1:3),y,Vector(1:5))
end

@test !issymmetric(fill(1,5,3))
@@ -183,7 +185,7 @@ end
@testset "2-argument version of scale!" begin
@test scale!(copy(a), 5.) == a*5
@test scale!(5., copy(a)) == a*5
b = randn(Base.LinAlg.SCAL_CUTOFF) # make sure we try BLAS path
b = randn(LinearAlgebra.SCAL_CUTOFF) # make sure we try BLAS path
subB = view(b, :, :)
@test scale!(copy(b), 5.) == b*5
@test scale!(copy(subB), 5.) == subB*5
@@ -265,26 +267,26 @@ end
@test norm(x, 3) ≈ cbrt(sqrt(125)+125)
end

@testset "LinAlg.axp(b)y! for element type without commutative multiplication" begin
@testset "LinearAlgebra.axp(b)y! for element type without commutative multiplication" begin
α = [1 2; 3 4]
β = [5 6; 7 8]
x = fill([ 9 10; 11 12], 3)
y = fill([13 14; 15 16], 3)
axpy = LinAlg.axpy!(α, x, deepcopy(y))
axpby = LinAlg.axpby!(α, x, β, deepcopy(y))
axpy = LinearAlgebra.axpy!(α, x, deepcopy(y))
axpby = LinearAlgebra.axpby!(α, x, β, deepcopy(y))
@test axpy == x .* [α] .+ y
@test axpy != [α] .* x .+ y
@test axpby == x .* [α] .+ y .* [β]
@test axpby != [α] .* x .+ [β] .* y
end

@testset "LinAlg.axpy! for x and y of different dimensions" begin
@testset "LinearAlgebra.axpy! for x and y of different dimensions" begin
α = 5
x = 2:5
y = fill(1, 2, 4)
rx = [1 4]
ry = [2 8]
@test LinAlg.axpy!(α, x, rx, y, ry) == [1 1 1 1; 11 1 1 26]
@test LinearAlgebra.axpy!(α, x, rx, y, ry) == [1 1 1 1; 11 1 1 26]
end
@testset "norm and normalize!" begin
vr = [3.0, 4.0]
@@ -316,15 +318,15 @@ end
@test det([true false; false true]) == det(Matrix(1I, 2, 2))
end

@test_throws ArgumentError Base.LinAlg.char_uplo(:Z)
@test_throws ArgumentError LinearAlgebra.char_uplo(:Z)

@testset "Issue 17650" begin
@test [0.01311489462160816, Inf] ≈ [0.013114894621608135, Inf]
end

@testset "Issue 19035" begin
@test Base.LinAlg.promote_leaf_eltypes([1, 2, [3.0, 4.0]]) == Float64
@test Base.LinAlg.promote_leaf_eltypes([[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]) == ComplexF64
@test LinearAlgebra.promote_leaf_eltypes([1, 2, [3.0, 4.0]]) == Float64
@test LinearAlgebra.promote_leaf_eltypes([[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]) == ComplexF64
@test [1, 2, 3] ≈ [1, 2, 3]
@test [[1, 2], [3, 4]] ≈ [[1, 2], [3, 4]]
@test [[1, 2], [3, 4]] ≈ [[1.0-eps(), 2.0+eps()], [3.0+2eps(), 4.0-1e8eps()]]
@@ -351,8 +353,8 @@ Base.one(::Type{ModInt{n}}) where {n} = ModInt{n}(1)
Base.one(::ModInt{n}) where {n} = ModInt{n}(1)
Base.adjoint(a::ModInt{n}) where {n} = ModInt{n}(conj(a))
Base.transpose(a::ModInt{n}) where {n} = a # see Issue 20978
Base.LinAlg.Adjoint(a::ModInt{n}) where {n} = adjoint(a)
Base.LinAlg.Transpose(a::ModInt{n}) where {n} = transpose(a)
LinearAlgebra.Adjoint(a::ModInt{n}) where {n} = adjoint(a)
LinearAlgebra.Transpose(a::ModInt{n}) where {n} = transpose(a)

@testset "Issue 22042" begin
A = [ModInt{2}(1) ModInt{2}(0); ModInt{2}(1) ModInt{2}(1)]
@@ -373,7 +375,7 @@ end
end

@testset "generic functions for checking whether matrices have banded structure" begin
using Base.LinAlg: isbanded
using LinearAlgebra: isbanded
pentadiag = [1 2 3; 4 5 6; 7 8 9]
tridiag = [1 2 0; 4 5 6; 0 8 9]
ubidiag = [1 2 0; 0 5 6; 0 0 9]
@@ -425,3 +427,5 @@ end
@test isdiag(adiag)
end
end

end # module TestGeneric
9 changes: 6 additions & 3 deletions test/linalg/givens.jl → stdlib/LinearAlgebra/test/givens.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
using Base.LinAlg: mul!
module TestGivens

using Test, LinearAlgebra, Random

# Test givens rotations
@testset for elty in (Float32, Float64, ComplexF32, ComplexF64)
@@ -12,7 +13,7 @@ using Base.LinAlg: mul!
end
@testset for A in (raw_A, view(raw_A, 1:10, 1:10))
Ac = copy(A)
R = Base.LinAlg.Rotation(Base.LinAlg.Givens{elty}[])
R = LinearAlgebra.Rotation(LinearAlgebra.Givens{elty}[])
for j = 1:8
for i = j+2:10
G, _ = givens(A, j+1, i, j)
@@ -66,3 +67,5 @@ using Base.LinAlg: mul!
end
end
end

end # module TestGivens
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestHessenberg

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted
using Test, LinearAlgebra, Random

let n = 10
srand(1234321)
@@ -31,3 +31,5 @@ let n = 10
end
end
end

end # module TestHessenberg
41 changes: 21 additions & 20 deletions test/linalg/lapack.jl → stdlib/LinearAlgebra/test/lapack.jl
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestLAPACK

import Base.LinAlg.BlasInt
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasInt



@test_throws ArgumentError Base.LinAlg.LAPACK.chkuplo('Z')
@test_throws ArgumentError Base.LinAlg.LAPACK.chkside('Z')
@test_throws ArgumentError Base.LinAlg.LAPACK.chkdiag('Z')
@test_throws ArgumentError Base.LinAlg.LAPACK.chktrans('Z')
@test_throws ArgumentError LinearAlgebra.LAPACK.chkuplo('Z')
@test_throws ArgumentError LinearAlgebra.LAPACK.chkside('Z')
@test_throws ArgumentError LinearAlgebra.LAPACK.chkdiag('Z')
@test_throws ArgumentError LinearAlgebra.LAPACK.chktrans('Z')

@testset "syevr" begin
srand(123)
@@ -107,15 +106,15 @@ end
A = diagm(-2 => dl2, -1 => dl, 0 => d, 1 => du)
@test A\C ≈ D
@test_throws DimensionMismatch LAPACK.gbtrs!('N',2,1,6,AB,ipiv,Matrix{elty}(uninitialized,7,6))
@test_throws Base.LinAlg.LAPACKException LAPACK.gbtrf!(2,1,6,zeros(elty,6,6))
@test_throws LinearAlgebra.LAPACKException LAPACK.gbtrf!(2,1,6,zeros(elty,6,6))
end
end


@testset "geqp3, geqrt error handling" begin
@testset for elty in (Float32, Float64, ComplexF32, ComplexF64)
x10, x11 = Vector{elty}.(uninitialized, (10, 11))
y10, y11 = Vector{Base.LinAlg.BlasInt}.(uninitialized, (10, 11))
y10, y11 = Vector{LinearAlgebra.BlasInt}.(uninitialized, (10, 11))
A10x10, A11x10, A10x11, A11x11 = Matrix{elty}.(uninitialized, ((10,10), (11,10), (10,11), (11,11)))
@test_throws DimensionMismatch LAPACK.geqlf!(A10x10, x11)
@test_throws DimensionMismatch LAPACK.gelqf!(A10x10, x11)
@@ -132,7 +131,7 @@ end
@testset "gels, gesv, getrs, getri error handling" begin
@testset for elty in (Float32, Float64, ComplexF32, ComplexF64)
A10x10, B11x11 = Matrix{elty}.(uninitialized, ((10,10), (11,11)))
x10, x11 = Vector{Base.LinAlg.BlasInt}.(uninitialized, (10, 11))
x10, x11 = Vector{LinearAlgebra.BlasInt}.(uninitialized, (10, 11))
@test_throws DimensionMismatch LAPACK.gels!('N',A10x10,B11x11)
@test_throws DimensionMismatch LAPACK.gels!('T',A10x10,B11x11)
@test_throws DimensionMismatch LAPACK.gesv!(A10x10,B11x11)
@@ -563,14 +562,14 @@ end
@testset for elty in (Float32, Float64, ComplexF32, ComplexF64)
T = triu(rand(elty,10,10))
S = copy(T)
select = zeros(Base.LinAlg.BlasInt,10)
select = zeros(LinearAlgebra.BlasInt,10)
select[1] = 1
select,Vr = LAPACK.trevc!('R','S',select,copy(T))
@test Vr ≈ eigvecs(S)[:,1]
select = zeros(Base.LinAlg.BlasInt,10)
select = zeros(LinearAlgebra.BlasInt,10)
select[1] = 1
select,Vl = LAPACK.trevc!('L','S',select,copy(T))
select = zeros(Base.LinAlg.BlasInt,10)
select = zeros(LinearAlgebra.BlasInt,10)
select[1] = 1
select,Vln,Vrn = LAPACK.trevc!('B','S',select,copy(T))
@test Vrn ≈ eigvecs(S)[:,1]
@@ -592,7 +591,7 @@ end
for c in ('V', 'N')
A = convert(Matrix{elty}, [7 2 2 1; 1 5 2 0; 0 3 9 4; 1 1 1 4])
T,Q,d = schur(A)
s, sep = Base.LinAlg.LAPACK.trsen!(job,c,Array{LinAlg.BlasInt}([0,1,0,0]),T,Q)[4:5]
s, sep = LinearAlgebra.LAPACK.trsen!(job,c,Array{LinearAlgebra.BlasInt}([0,1,0,0]),T,Q)[4:5]
@test d[1] ≈ T[2,2]
@test d[2] ≈ T[1,1]
if c == 'V'
@@ -618,7 +617,7 @@ end
for c in ('V', 'N')
A = convert(Matrix{elty}, [7 2 2 1; 1 5 2 0; 0 3 9 4; 1 1 1 4])
T,Q,d = schur(A)
Base.LinAlg.LAPACK.trexc!(c,LinAlg.BlasInt(1),LinAlg.BlasInt(2),T,Q)
LinearAlgebra.LAPACK.trexc!(c,LinearAlgebra.BlasInt(1),LinearAlgebra.BlasInt(2),T,Q)
@test d[1] ≈ T[2,2]
@test d[2] ≈ T[1,1]
if c == 'V'
@@ -638,16 +637,16 @@ end
A = convert(Matrix{elty}, complex.(randn(10,nn),randn(10,nn)))
end ## LU (only equal for real because LAPACK uses different absolute value when choosing permutations)
if elty <: Real
FJulia = Base.LinAlg.generic_lufact!(copy(A))
FLAPACK = Base.LinAlg.LAPACK.getrf!(copy(A))
FJulia = LinearAlgebra.generic_lufact!(copy(A))
FLAPACK = LinearAlgebra.LAPACK.getrf!(copy(A))
@test FJulia.factors ≈ FLAPACK[1]
@test FJulia.ipiv ≈ FLAPACK[2]
@test FJulia.info ≈ FLAPACK[3]
end

## QR
FJulia = LinAlg.qrfactUnblocked!(copy(A))
FLAPACK = Base.LinAlg.LAPACK.geqrf!(copy(A))
FJulia = LinearAlgebra.qrfactUnblocked!(copy(A))
FLAPACK = LinearAlgebra.LAPACK.geqrf!(copy(A))
@test FJulia.factors ≈ FLAPACK[1]
@test FJulia.τ ≈ FLAPACK[2]
end
@@ -663,3 +662,5 @@ end
let A = [NaN NaN; NaN NaN]
@test_throws ArgumentError eigfact(A)
end

end # module TestLAPACK
16 changes: 9 additions & 7 deletions test/linalg/lq.jl → stdlib/LinearAlgebra/test/lq.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, mul!
module TestLQ

using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, mul!

n = 10

@@ -21,8 +21,8 @@ breal = randn(n,2)/2
bimg = randn(n,2)/2

# helper functions to unambiguously recover explicit forms of an LQPackedQ
squareQ(Q::LinAlg.LQPackedQ) = (n = size(Q.factors, 2); mul!(Q, Matrix{eltype(Q)}(I, n, n)))
rectangularQ(Q::LinAlg.LQPackedQ) = convert(Array, Q)
squareQ(Q::LinearAlgebra.LQPackedQ) = (n = size(Q.factors, 2); mul!(Q, Matrix{eltype(Q)}(I, n, n)))
rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q)

@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64)
a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal)
@@ -146,7 +146,7 @@ end

@testset "getindex on LQPackedQ (#23733)" begin
local m, n
function getqs(F::Base.LinAlg.LQ)
function getqs(F::LinearAlgebra.LQ)
implicitQ = F.Q
sq = size(implicitQ.factors, 2)
explicitQ = mul!(implicitQ, Matrix{eltype(implicitQ)}(I, sq, sq))
@@ -188,7 +188,7 @@ end
end

@testset "postmultiplication with / right-application of LQPackedQ (#23779)" begin
function getqs(F::Base.LinAlg.LQ)
function getqs(F::LinearAlgebra.LQ)
implicitQ = F.Q
explicitQ = mul!(implicitQ, Matrix{eltype(implicitQ)}(I, size(implicitQ)...))
return implicitQ, explicitQ
@@ -223,3 +223,5 @@ end
@test_throws DimensionMismatch C * adjoint(implicitQ)
@test_throws DimensionMismatch adjoint(C) * adjoint(implicitQ)
end

end # module TestLQ
25 changes: 14 additions & 11 deletions test/linalg/lu.jl → stdlib/LinearAlgebra/test/lu.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test
using Base.LinAlg: ldiv!
import Base.LinAlg.BlasInt, Base.LinAlg.BlasFloat
module TestLU

using Test, LinearAlgebra, Random
using LinearAlgebra: ldiv!, BlasInt, BlasFloat

n = 10

@@ -58,7 +59,7 @@ dimg = randn(n)/2
end
@testset "Singular LU" begin
lua = lufact(zeros(eltya, 3, 3))
@test !LinAlg.issuccess(lua)
@test !LinearAlgebra.issuccess(lua)
@test sprint((t, s) -> show(t, "text/plain", s), lua) == "Failed factorization of type $(typeof(lua))"
end
κ = cond(a,1)
@@ -74,7 +75,7 @@ dimg = randn(n)/2
@test copy(lua) == lua
if eltya <: BlasFloat
# test conversion of LU factorization's numerical type
bft = eltya <: Real ? Base.LinAlg.LU{BigFloat} : Base.LinAlg.LU{Complex{BigFloat}}
bft = eltya <: Real ? LinearAlgebra.LU{BigFloat} : LinearAlgebra.LU{Complex{BigFloat}}
bflua = convert(bft, lua)
@test bflua.L*bflua.U ≈ big.(a)[p,:] rtol=ε
end
@@ -86,7 +87,7 @@ dimg = randn(n)/2
κd = cond(Array(d),1)
@testset "Tridiagonal LU" begin
lud = lufact(d)
@test LinAlg.issuccess(lud)
@test LinearAlgebra.issuccess(lud)
@test lufact(lud) == lud
@test_throws ErrorException lud.Z
@test lud.L*lud.U ≈ lud.P*Array(d)
@@ -146,14 +147,14 @@ dimg = randn(n)/2
@test_throws DimensionMismatch lud\f
@test_throws DimensionMismatch transpose(lud)\f
@test_throws DimensionMismatch lud'\f
@test_throws DimensionMismatch Base.LinAlg.ldiv!(transpose(lud), f)
@test_throws DimensionMismatch LinearAlgebra.ldiv!(transpose(lud), f)
let Bs = copy(b)
for bb in (Bs, view(Bs, 1:n, 1))
@test norm(d*(lud\bb) - bb, 1) < ε*κd*n*2 # Two because the right hand side has two columns
if eltya <: Real
@test norm((transpose(lud)\bb) - Array(transpose(d))\bb, 1) < ε*κd*n*2 # Two because the right hand side has two columns
if eltya != Int && eltyb != Int
@test norm(Base.LinAlg.ldiv!(transpose(lud), copy(bb)) - Array(transpose(d))\bb, 1) < ε*κd*n*2
@test norm(LinearAlgebra.ldiv!(transpose(lud), copy(bb)) - Array(transpose(d))\bb, 1) < ε*κd*n*2
end
end
if eltya <: Complex
@@ -173,7 +174,7 @@ dimg = randn(n)/2
du[1] = zero(eltya)
dl[1] = zero(eltya)
zT = Tridiagonal(dl,dd,du)
@test !LinAlg.issuccess(lufact(zT))
@test !LinearAlgebra.issuccess(lufact(zT))
end
end
@testset "Thin LU" begin
@@ -242,15 +243,15 @@ end
end

@testset "Issue 21453" begin
@test_throws ArgumentError LinAlg._cond1Inf(lufact(randn(5,5)), 2, 2.0)
@test_throws ArgumentError LinearAlgebra._cond1Inf(lufact(randn(5,5)), 2, 2.0)
end

@testset "REPL printing" begin
bf = IOBuffer()
show(bf, "text/plain", lufact(Matrix(I, 4, 4)))
seekstart(bf)
@test String(take!(bf)) == """
Base.LinAlg.LU{Float64,Array{Float64,2}}
LinearAlgebra.LU{Float64,Array{Float64,2}}
L factor:
4×4 Array{Float64,2}:
1.0 0.0 0.0 0.0
@@ -264,3 +265,5 @@ U factor:
0.0 0.0 1.0 0.0
0.0 0.0 0.0 1.0"""
end

end # module TestLU
48 changes: 26 additions & 22 deletions test/linalg/matmul.jl → stdlib/LinearAlgebra/test/matmul.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestMatmul

using Base.LinAlg: mul!
using Test, LinearAlgebra, Random
using LinearAlgebra: mul!

## Test Julia fallbacks to BLAS routines

@@ -102,11 +103,11 @@ end
@test mul!(C, transpose(A), B) == A'*B
@test mul!(C, A, transpose(B)) == A*B'
@test mul!(C, transpose(A), transpose(B)) == A'*B'
@test Base.LinAlg.mul!(C, adjoint(A), transpose(B)) == A'*transpose(B)
@test LinearAlgebra.mul!(C, adjoint(A), transpose(B)) == A'*transpose(B)

#test DimensionMismatch for generic_matmatmul
@test_throws DimensionMismatch Base.LinAlg.mul!(C, adjoint(A), transpose(fill(1,4,4)))
@test_throws DimensionMismatch Base.LinAlg.mul!(C, adjoint(fill(1,4,4)), transpose(B))
@test_throws DimensionMismatch LinearAlgebra.mul!(C, adjoint(A), transpose(fill(1,4,4)))
@test_throws DimensionMismatch LinearAlgebra.mul!(C, adjoint(fill(1,4,4)), transpose(B))
end
vv = [1,2]
CC = Matrix{Int}(uninitialized, 2, 2)
@@ -119,8 +120,8 @@ end
AA = rand(5,5)
BB = rand(5)
for A in (copy(AA), view(AA, 1:5, 1:5)), B in (copy(BB), view(BB, 1:5))
@test_throws DimensionMismatch Base.LinAlg.generic_matvecmul!(zeros(6),'N',A,B)
@test_throws DimensionMismatch Base.LinAlg.generic_matvecmul!(B,'N',A,zeros(6))
@test_throws DimensionMismatch LinearAlgebra.generic_matvecmul!(zeros(6),'N',A,B)
@test_throws DimensionMismatch LinearAlgebra.generic_matvecmul!(B,'N',A,zeros(6))
end
vv = [1,2,3]
CC = Matrix{Int}(uninitialized, 3, 3)
@@ -139,9 +140,9 @@ end
BB = rand(Float64,6,6)
CC = zeros(Float64,6,6)
for A in (copy(AA), view(AA, 1:6, 1:6)), B in (copy(BB), view(BB, 1:6, 1:6)), C in (copy(CC), view(CC, 1:6, 1:6))
@test Base.LinAlg.mul!(C, transpose(A), transpose(B)) == transpose(A)*transpose(B)
@test Base.LinAlg.mul!(C, A, adjoint(B)) == A*transpose(B)
@test Base.LinAlg.mul!(C, adjoint(A), B) == transpose(A)*B
@test LinearAlgebra.mul!(C, transpose(A), transpose(B)) == transpose(A)*transpose(B)
@test LinearAlgebra.mul!(C, A, adjoint(B)) == A*transpose(B)
@test LinearAlgebra.mul!(C, adjoint(A), B) == transpose(A)*B
end
end

@@ -194,8 +195,8 @@ end
@test *(adjoint(Asub), Asub) == *(adjoint(Aref), Aref)

A5x5, A6x5 = Matrix{Float64}.(uninitialized, ((5, 5), (6, 5)))
@test_throws DimensionMismatch Base.LinAlg.syrk_wrapper!(A5x5,'N',A6x5)
@test_throws DimensionMismatch Base.LinAlg.herk_wrapper!(A5x5,'N',A6x5)
@test_throws DimensionMismatch LinearAlgebra.syrk_wrapper!(A5x5,'N',A6x5)
@test_throws DimensionMismatch LinearAlgebra.herk_wrapper!(A5x5,'N',A6x5)
end

@testset "matmul for types w/o sizeof (issue #1282)" begin
@@ -261,24 +262,24 @@ end
@test A*b == Vector{Float64}[[2,2,1], [2,2]]
end

@test_throws ArgumentError Base.LinAlg.copytri!(Matrix{Float64}(uninitialized,10,10),'Z')
@test_throws ArgumentError LinearAlgebra.copytri!(Matrix{Float64}(uninitialized,10,10),'Z')

@testset "gemv! and gemm_wrapper for $elty" for elty in [Float32,Float64,ComplexF64,ComplexF32]
A10x10, x10, x11 = Array{elty}.(uninitialized, ((10,10), 10, 11))
@test_throws DimensionMismatch Base.LinAlg.gemv!(x10,'N',A10x10,x11)
@test_throws DimensionMismatch Base.LinAlg.gemv!(x11,'N',A10x10,x10)
@test Base.LinAlg.gemv!(elty[], 'N', Matrix{elty}(uninitialized,0,0), elty[]) == elty[]
@test Base.LinAlg.gemv!(x10, 'N', Matrix{elty}(uninitialized,10,0), elty[]) == zeros(elty,10)
@test_throws DimensionMismatch LinearAlgebra.gemv!(x10,'N',A10x10,x11)
@test_throws DimensionMismatch LinearAlgebra.gemv!(x11,'N',A10x10,x10)
@test LinearAlgebra.gemv!(elty[], 'N', Matrix{elty}(uninitialized,0,0), elty[]) == elty[]
@test LinearAlgebra.gemv!(x10, 'N', Matrix{elty}(uninitialized,10,0), elty[]) == zeros(elty,10)

I0x0 = Matrix{elty}(I, 0, 0)
I10x10 = Matrix{elty}(I, 10, 10)
I10x11 = Matrix{elty}(I, 10, 11)
@test Base.LinAlg.gemm_wrapper('N','N', I10x10, I10x10) == I10x10
@test_throws DimensionMismatch Base.LinAlg.gemm_wrapper!(I10x10,'N','N', I10x11, I10x10)
@test_throws DimensionMismatch Base.LinAlg.gemm_wrapper!(I10x10,'N','N', I0x0, I0x0)
@test LinearAlgebra.gemm_wrapper('N','N', I10x10, I10x10) == I10x10
@test_throws DimensionMismatch LinearAlgebra.gemm_wrapper!(I10x10,'N','N', I10x11, I10x10)
@test_throws DimensionMismatch LinearAlgebra.gemm_wrapper!(I10x10,'N','N', I0x0, I0x0)

A = rand(elty,3,3)
@test Base.LinAlg.matmul3x3('T','N',A, Matrix{elty}(I, 3, 3)) == transpose(A)
@test LinearAlgebra.matmul3x3('T','N',A, Matrix{elty}(I, 3, 3)) == transpose(A)
end

@testset "#13593, #13488" begin
@@ -295,7 +296,8 @@ end
struct RootInt
i::Int
end
import Base: *, adjoint, transpose, Adjoint, Transpose
import Base: *, adjoint, transpose
import LinearAlgebra: Adjoint, Transpose
(*)(x::RootInt, y::RootInt) = x.i*y.i
adjoint(x::RootInt) = x
transpose(x::RootInt) = x
@@ -399,3 +401,5 @@ module TestPR18218
@test typeof(d) == Vector{TypeC}
@test d == TypeC[5, 11]
end

end # module TestMatmul
10 changes: 5 additions & 5 deletions test/linalg/pinv.jl → stdlib/LinearAlgebra/test/pinv.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

#
# Test the pseudo-inverse
#
module TestPinv

using Test, Random
using Test, LinearAlgebra, Random

srand(12345)

@@ -160,7 +158,7 @@ end
@test a.diag[2] ≈ 0.0
end

if eltya <: Base.LinAlg.BlasReal
if eltya <: LinearAlgebra.BlasReal
@testset "sub-normal numbers/vectors/matrices" begin
a = pinv(realmin(eltya)/100)
@test a ≈ 0.0
@@ -185,3 +183,5 @@ end
end
end
end

end # module TestPinv
17 changes: 10 additions & 7 deletions test/linalg/qr.jl → stdlib/LinearAlgebra/test/qr.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestQR

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted, mul!
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, mul!

n = 10

@@ -20,8 +21,8 @@ breal = randn(n,2)/2
bimg = randn(n,2)/2

# helper functions to unambiguously recover explicit forms of an implicit QR Q
squareQ(Q::LinAlg.AbstractQ) = (sq = size(Q.factors, 1); mul!(Q, Matrix{eltype(Q)}(I, sq, sq)))
rectangularQ(Q::LinAlg.AbstractQ) = convert(Array, Q)
squareQ(Q::LinearAlgebra.AbstractQ) = (sq = size(Q.factors, 1); mul!(Q, Matrix{eltype(Q)}(I, sq, sq)))
rectangularQ(Q::LinearAlgebra.AbstractQ) = convert(Array, Q)

@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int)
raw_a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal)
@@ -140,8 +141,8 @@ rectangularQ(Q::LinAlg.AbstractQ) = convert(Array, Q)
@test mul!(squareQ(q), adjoint(q)) ≈ Matrix(I, n, n)
@test_throws DimensionMismatch mul!(Matrix{eltya}(I, n+1, n+1), adjoint(q))
@test_throws BoundsError size(q,-1)
@test_throws DimensionMismatch Base.LinAlg.mul!(q,zeros(eltya,n1+1))
@test_throws DimensionMismatch Base.LinAlg.mul!(adjoint(q), zeros(eltya,n1+1))
@test_throws DimensionMismatch LinearAlgebra.mul!(q,zeros(eltya,n1+1))
@test_throws DimensionMismatch LinearAlgebra.mul!(adjoint(q), zeros(eltya,n1+1))

qra = qrfact(a[:,1:n1], Val(false))
q, r = qra.Q, qra.R
@@ -185,7 +186,7 @@ end

@testset "QR on Ints" begin
@test qr(Int[]) == (Int[],1)
@test Base.LinAlg.qr!(Int[1]) == (Int[1],1)
@test LinearAlgebra.qr!(Int[1]) == (Int[1],1)

B = rand(7,2)
@test (1:7)\B ≈ Vector(1:7)\B
@@ -211,3 +212,5 @@ end
A = rand(1//1:5//5, 4,3)
@test first(qr(A)) == first(qr(float(A)))
end

end # module TestQR
24 changes: 24 additions & 0 deletions stdlib/LinearAlgebra/test/runtests.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

include("triangular.jl")
include("qr.jl")
include("dense.jl")
include("matmul.jl")
include("schur.jl")
include("special.jl")
include("eigen.jl")
include("bunchkaufman.jl")
include("svd.jl")
include("lapack.jl")
include("tridiag.jl")
include("bidiag.jl")
include("diagonal.jl")
include("cholesky.jl")
include("lu.jl")
include("symmetric.jl")
include("generic.jl")
include("uniformscaling.jl")
include("lq.jl")
include("hessenberg.jl")
include("blas.jl")
include("adjtrans.jl")
13 changes: 8 additions & 5 deletions test/linalg/schur.jl → stdlib/LinearAlgebra/test/schur.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestSchur

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted

n = 10

@@ -59,7 +60,7 @@ aimg = randn(n,n)/2
sum(select) != 0 && @test S.values[findall(select)] ≈ O.values[1:sum(select)]
@test O.vectors*O.Schur*O.vectors' ≈ ordschura
@test_throws ErrorException f.A
Snew = Base.LinAlg.Schur(S.T, S.Z, S.values)
Snew = LinearAlgebra.Schur(S.T, S.Z, S.values)
SchurNew = ordschur!(copy(Snew), select)
@test O.vectors ≈ SchurNew.vectors
@test O.Schur ≈ SchurNew.Schur
@@ -92,7 +93,7 @@ aimg = randn(n,n)/2
# Make sure that we have sorted it correctly
@test NS.values[findall(select)] ≈ S.values[1:m]

Snew = Base.LinAlg.GeneralizedSchur(NS.S, NS.T, NS.alpha, NS.beta, NS.Q, NS.Z)
Snew = LinearAlgebra.GeneralizedSchur(NS.S, NS.T, NS.alpha, NS.beta, NS.Q, NS.Z)
SchurNew = ordschur!(copy(Snew), select)
@test S.Q ≈ SchurNew.Q
@test S.S ≈ SchurNew.S
@@ -108,9 +109,11 @@ aimg = randn(n,n)/2
end
end
@testset "0x0 matrix" for A in (zeros(eltya, 0, 0), view(rand(eltya, 2, 2), 1:0, 1:0))
T, Z, λ = Base.LinAlg.schur(A)
T, Z, λ = LinearAlgebra.schur(A)
@test T == A
@test Z == A
@test λ == zeros(0)
end
end

end # module TestSchur
16 changes: 9 additions & 7 deletions test/linalg/special.jl → stdlib/LinearAlgebra/test/special.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
using SparseArrays
module TestSpecial

using Base.LinAlg: mul!
using Test, LinearAlgebra, SparseArrays, Random
using LinearAlgebra: mul!

n= 10 #Size of matrix to test
srand(1)
@@ -100,7 +100,7 @@ end
end

C = rand(n,n)
for TriType in [Base.LinAlg.UnitLowerTriangular, Base.LinAlg.UnitUpperTriangular, UpperTriangular, LowerTriangular]
for TriType in [LinearAlgebra.UnitLowerTriangular, LinearAlgebra.UnitUpperTriangular, UpperTriangular, LowerTriangular]
D = TriType(C)
for Spectype in [Diagonal, Bidiagonal, Tridiagonal, Matrix]
@test Matrix(D + convert(Spectype,A)) ≈ Matrix(D + A)
@@ -112,7 +112,7 @@ end
end

@testset "Triangular Types and QR" begin
for typ in [UpperTriangular,LowerTriangular,Base.LinAlg.UnitUpperTriangular,Base.LinAlg.UnitLowerTriangular]
for typ in [UpperTriangular,LowerTriangular,LinearAlgebra.UnitUpperTriangular,LinearAlgebra.UnitLowerTriangular]
a = rand(n,n)
atri = typ(a)
b = rand(n,n)
@@ -174,8 +174,8 @@ end
N = 4
# The tested annotation types
testfull = Bool(parse(Int,(get(ENV, "JULIA_TESTFULL", "0"))))
utriannotations = (UpperTriangular, Base.LinAlg.UnitUpperTriangular)
ltriannotations = (LowerTriangular, Base.LinAlg.UnitLowerTriangular)
utriannotations = (UpperTriangular, LinearAlgebra.UnitUpperTriangular)
ltriannotations = (LowerTriangular, LinearAlgebra.UnitLowerTriangular)
triannotations = (utriannotations..., ltriannotations...)
symannotations = (Symmetric, Hermitian)
annotations = testfull ? (triannotations..., symannotations...) : (LowerTriangular, Symmetric)
@@ -251,3 +251,5 @@ end
@testset "vcat of Vectors with SparseVectors should yield SparseVector (#22225)" begin
@test isa((@inferred vcat(Float64[], spzeros(1))), SparseVector)
end

end # module TestSpecial
9 changes: 6 additions & 3 deletions test/linalg/svd.jl → stdlib/LinearAlgebra/test/svd.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

using Test, Random
module TestSVD

using Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted
using Test, LinearAlgebra, Random
using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted

@testset "Simple svdvals / svdfact tests" begin
≊(x,y) = isapprox(x,y,rtol=1e-15)
@@ -97,7 +98,7 @@ a2img = randn(n,n)/2
@test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ c
end
end
if eltya <: Base.LinAlg.BlasReal
if eltya <: LinearAlgebra.BlasReal
@testset "Number input" begin
x, y = randn(eltya, 2)
@test svdfact(x) == svdfact(fill(x, 1, 1))
@@ -122,3 +123,5 @@ a2img = randn(n,n)/2
end
end
end

end # module TestSVD
Loading

0 comments on commit 8b73a98

Please sign in to comment.