From 710b8180c0c44cbf0479005474a1cca7d4cf0510 Mon Sep 17 00:00:00 2001 From: Christian Peel Date: Sat, 13 Apr 2019 12:24:28 -0700 Subject: [PATCH] Brun, docs, license * added Documnter.jl docs * added Brun lattice reduction. The comment on commit b767593 saying Brun was added then was incorrect * Added the sizereduction function * added functions returning lattic metrics for a matrix: orthogonalityFactor, hermiteFactor, seysenCond * adde isLLLreduced, issizereduced * updated license to match standard MIT form --- LICENSE.md | 43 +++++------ Project.toml | 2 +- README.md | 104 +++++++++++++------------- docs/make.jl | 9 +++ docs/src/functions.md | 25 +++++++ docs/src/index.md | 160 ++++++++++++++++++++++++++++++++++++++++ src/LLLplus.jl | 13 +++- src/applications.jl | 37 ++++++---- src/brun.jl | 73 ++++++++++++++++++ src/cvp.jl | 5 ++ src/hard_sphere.jl | 86 +++++++++++++++------- src/lll.jl | 66 +++++++++++------ src/seysen.jl | 8 +- src/utilities.jl | 167 ++++++++++++++++++++++++++++++++++++++++++ src/vblast.jl | 41 +++++++---- test/runtests.jl | 7 +- 16 files changed, 690 insertions(+), 156 deletions(-) create mode 100644 docs/make.jl create mode 100644 docs/src/functions.md create mode 100644 docs/src/index.md create mode 100644 src/brun.jl create mode 100644 src/utilities.jl diff --git a/LICENSE.md b/LICENSE.md index 256c561..3f23ce2 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,24 +1,21 @@ -The LLLplus.jl package is licensed under the MIT License. The package -consists of the contents of the /src and other files in this -repository. +MIT License -> Copyright (c) 2019: Christian Peel -> -> Permission is hereby granted, free of charge, to any person obtaining -> a copy of this software and associated documentation files (the -> "Software"), to deal in the Software without restriction, including -> without limitation the rights to use, copy, modify, merge, publish, -> distribute, sublicense, and/or sell copies of the Software, and to -> permit persons to whom the Software is furnished to do so, subject to -> the following conditions: -> -> The above copyright notice and this permission notice shall be -> included in all copies or substantial portions of the Software. -> -> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2019: Christian Peel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Project.toml b/Project.toml index 014e460..b173c17 100644 --- a/Project.toml +++ b/Project.toml @@ -2,7 +2,7 @@ name = "LLLplus" uuid = "142c1900-a1c3-58ae-a66d-b187f9ca6423" keywords = ["lattice reduction", "LLL", "Seysen", "closest vector problem"] authors = ["Christian Peel "] -version = "1.1.0" +version = "1.2.0" [deps] BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" diff --git a/README.md b/README.md index fc4dff3..88a0e91 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,14 @@ # LLLplus.jl [![Build Status](https://travis-ci.org/christianpeel/LLLplus.jl.svg?branch=master)](https://travis-ci.org/christianpeel/LLLplus.jl) - -Lattice reduction and related lattice tools are used in +Lattice reduction and related lattice tools are used in cryptography, digital communication, and integer programming. LLLplus -includes Lenstra-Lenstra-Lovacsz (LLL) lattice reduction, Seysen -lattice reduction, VBLAST matrix decomposition, and a closest vector -problem (CVP) solver. +includes Lenstra-Lenstra-Lovacsz (LLL), Brun, +and Seysen lattice reduction; VBLAST matrix decomposition; and a closest vector +problem (CVP) solver. The historical and practical prominence of the +LLL technique in lattice tools is the reason for its use in the +name "LLLplus". [LLL](https://en.wikipedia.org/wiki/Lenstra%E2%80%93Lenstra%E2%80%93Lov%C3%A1sz_lattice_basis_reduction_algorithm) [1] lattice reduction is a powerful tool that is widely used in @@ -17,24 +16,26 @@ cryptanalysis, in cryptographic system design, in digital communications, and to solve other integer problems. LLL reduction is often used as an approximate solution to the [shortest vector problem](https://en.wikipedia.org/wiki/Lattice_problem#Shortest_vector_problem_.28SVP.29) -(SVP). We also include Gauss/Lagrange 2-dimensional lattice reduction -as a learning tool, and Seysen [2] lattice reduction which -simultaneously reduces a basis `B` and its dual `inv(B)'`. The LLL and -Seysen algorithms are based on [3]. The +(SVP). We also include Gauss/Lagrange, Brun [2] and Seysen [3] +lattice reduction techniques. The LLL, Brun, and +Seysen algorithms are based on [4]. The [CVP](https://en.wikipedia.org/wiki/Lattice_problem#Closest_vector_problem_.28CVP.29) -solver is based on [4] and can handle lattices and bounded integer +solver is based on [5] and can handle lattices and bounded integer constellations. - - - We also include code to do a [Vertical-Bell Laboratories Layered Space-Time](https://en.wikipedia.org/wiki/Bell_Laboratories_Layered_Space-Time) -(V-BLAST) [5] matrix decomposition which is used in digital -communications. The LLL, -Seysen, V-BLAST, and CVP functions are used to solve (exactly or -approximately) CVP problems in encoding and decoding multi-terminal -signals. +(V-BLAST) [6] matrix decomposition which is used in digital +communications. The LLL, Brun, Seysen, V-BLAST, and CVP functions are +used to solve (exactly or approximately) CVP problems in encoding and +decoding multi-terminal signals. + +Another important application of is in cryptanalysis: attacking the +security of security systems. As an example of the sort of tools that +are used in cryptanalysis, see the `subsetsum` +function. Another important application is in integer programming, +where the LLL algorithm has been shown to solve the integer +programming feasibility problem; see `integerfeasibility`. ### Examples @@ -45,7 +46,7 @@ package on random lattices. Pkg.add("LLLplus") using LLLplus -# Time LLL, VBLAST decomposition of a complex matrix with randn entries +# Time LLL, VBLAST decomposition of a complex matrix with randn entries N = 1000; H = randn(N,N) + im*randn(N,N); println("Testing LLL on $(N)x$(N) complex matrix...") @@ -66,7 +67,7 @@ println("Testing Seysen on same $(N)x$(N) matrix...") ### Execution Time results -On this page we give a few performance results obtained from the +The following performance results are obtained from the following command in the top-level LLLplus directory: `julia -e 'include("benchmark/perftest.jl")'` In the tests we time execution of the lattice-reduction functions, @@ -83,10 +84,10 @@ emulate unit-variance Gaussian-distributed values. ![Time vs matrix size](benchmark/perfVsNfloat64.png) -Though the focus of the package is on floating-point, +Though the focus of the package is on floating-point, all the modules can handle a variety of data types. In the next figure we show execution time for several datatypes (Int32, Int64, -Int128, Float32, Float64, DoublFloat, BitInt, and BigFloat) which are used to +Int128, Float32, Float64, DoubleFloat, BitInt, and BigFloat) which are used to generate 40 128x128 matrices, over which execution time for the lattice reduction techniques is averaged. The vertical axis is a logarithmic representation of execution time as in the previous @@ -94,56 +95,59 @@ figure. ![Time vs data type](benchmark/perfVsDataType.png) -The algorithm pseudocode in the monograph [6] and the survey paper [3] +The algorithm pseudocode in the monograph [7] and the survey paper [4] were very helpful in writing the lattice reduction tools in LLLplus -and are a good resource for further study. - -LLLplus does not pretend to be a tool for number-theory work. For -that, see the [Nemo.jl](https://github.com/wbhart/Nemo.jl) package -which uses the [FLINT](http://flintlib.org/) C library to do LLL +and are a good resource for further study. If you are trying to break +one of the [Lattice Challenge](http://www.latticechallenge.org) +records or are looking for robust, well-proven lattice tools, look at +[fplll](https://github.com/fplll/fplll). Also, for many +number-theoretic problems the +[Nemo.jl](https://github.com/wbhart/Nemo.jl) package is appropriate; +it uses the [FLINT](http://flintlib.org/) C library to do LLL reduction on Nemo-specific data types. +Finally, LLLplus should have version number of about 0.2.0 rather +than 1.2.0; please treat the package as experimental. ### Future Possible improvements include: -* Add Block-Korkin-Zolotarev lattice redution, with improvements - as in [7], code for Babai's simple CVP approximation [8], and - Brun's integer relation decomposition. +* Add Block-Korkin-Zolotarev lattice reduction, with improvements + as in [8], code for Babai's CVP approximation [9], and explicit CVP + approximations using LLL, Brun, Seysen, and VBLAST. * The [SVP](http://www.latticechallenge.org/svp-challenge/) Challenge and the [Ideal](http://www.latticechallenge.org/ideallattice-challenge/) - Lattice challenge have code to generate lattices for the respective - contests which could be used or duplicated to make challenging - performance tests. The main - [Lattice](http://www.latticechallenge.org/) Challenge also lists - references which could be used to replicate tests. + Lattice challenge have code to generate lattices + which could be used to make challenging performance tests. * Compare with the [fplll](https://github.com/fplll/fplll) library, the [Number Theory Library](http://www.shoup.net/ntl/), and NEMO/FLINT. - - - - + + ### References -[1] A. K. Lenstra; H. W. Lenstra Jr.; L. Lovász, ["Factoring polynomials with rational coefficients"](http://ftp.cs.elte.hu/~lovasz/scans/lll.pdf). Mathematische Annalen 261, 1982 +[1] A. K. Lenstra; H. W. Lenstra Jr.; L. Lovász, ["Factoring polynomials with rational coefficients"](http://ftp.cs.elte.hu/~lovasz/scans/lll.pdf). Mathematische Annalen 261, 1982. + +[2] V. Brun, +["En generalisation av kjedebrøken I,"](https://archive.org/stream/skrifterutgitavv201chri#page/300/mode/2up) +Skr. Vidensk. Selsk. Kristiana, Mat. Nat. Klasse, 1919. -[2] M. Seysen, ["Simultaneous reduction of a lattice basis and its reciprocal basis"](http://link.springer.com/article/10.1007%2FBF01202355) Combinatorica, 1993. +[3] M. Seysen, ["Simultaneous reduction of a lattice basis and its reciprocal basis"](http://link.springer.com/article/10.1007%2FBF01202355) Combinatorica, 1993. -[3] D. Wuebben, D. Seethaler, J. Jalden, and G. Matz, ["Lattice Reduction - A Survey with Applications in Wireless Communications"](http://www.ant.uni-bremen.de/sixcms/media.php/102/10740/SPM_2011_Wuebben.pdf). IEEE Signal Processing Magazine, 2011. +[4] D. Wuebben, D. Seethaler, J. Jalden, and G. Matz, ["Lattice Reduction - A Survey with Applications in Wireless Communications"](http://www.ant.uni-bremen.de/sixcms/media.php/102/10740/SPM_2011_Wuebben.pdf). IEEE Signal Processing Magazine, 2011. -[4] A. Ghasemmehdi, E. Agrell, ["Faster Recursions in Sphere Decoding"](https://publications.lib.chalmers.se/records/fulltext/local_141586.pdf) IEEE +[5] A. Ghasemmehdi, E. Agrell, ["Faster Recursions in Sphere Decoding"](https://publications.lib.chalmers.se/records/fulltext/local_141586.pdf) IEEE Transactions on Information Theory, vol 57, issue 6 , June 2011. -[5] P. W. Wolniansky, G. J. Foschini, G. D. Golden, R. A. Valenzuela, ["V-BLAST: An Architecture for Realizing Very High Data Rates Over the Rich-Scattering Wireless Channel"](http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=738086). Proc. URSI +[6] P. W. Wolniansky, G. J. Foschini, G. D. Golden, R. A. Valenzuela, ["V-BLAST: An Architecture for Realizing Very High Data Rates Over the Rich-Scattering Wireless Channel"](http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=738086). Proc. URSI ISSSE: 295–300, 1998. -[6] M. R. Bremner, ["Lattice Basis Reduction: An Introduction to the LLL +[7] M. R. Bremner, ["Lattice Basis Reduction: An Introduction to the LLL Algorithm and Its Applications"](https://www.amazon.com/Lattice-Basis-Reduction-Introduction-Applications/dp/1439807027) CRC Press, 2012. -[7] Y. Chen, P. Q. Nguyen, ["BKZ 2.0: Better Lattice Security Estimates"](http://www.iacr.org/archive/asiacrypt2011/70730001/70730001.pdf). Proc. ASIACRYPT 2011. +[8] Y. Chen, P. Q. Nguyen, ["BKZ 2.0: Better Lattice Security Estimates"](http://www.iacr.org/archive/asiacrypt2011/70730001/70730001.pdf). Proc. ASIACRYPT 2011. -[8] L. Babai, ["On Lovász’ lattice reduction and the nearest lattice point problem"](https://link.springer.com/article/10.1007/BF02579403), +[9] L. Babai, ["On Lovász’ lattice reduction and the nearest lattice point problem"](https://link.springer.com/article/10.1007/BF02579403), Combinatorica, 1986. diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..7910c4f --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,9 @@ +using LLLplus, Documenter, MacroTools + +makedocs( + sitename="LLLplus.jl", + pages = [ + "Home" => "index.md", + "Functions" => "functions.md"], + format = Documenter.HTML(prettyurls = haskey(ENV, "CI"))) + diff --git a/docs/src/functions.md b/docs/src/functions.md new file mode 100644 index 0000000..91ac169 --- /dev/null +++ b/docs/src/functions.md @@ -0,0 +1,25 @@ +# Functions + + +```@meta +DocTestSetup = quote + using LLLplus + using LinearAlgebra +end +``` +```@docs + lll + cvp + svp + gauss + seysen + vblast + subsetsum + integerfeasibility + hard_sphere + issizereduced + islllreduced + orthogonalitydefect + hermitefactor + seysencond +``` diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 0000000..249a999 --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,160 @@ +## LLLplus + +```@meta +CurrentModule = LLLplus +``` + +Lattice reduction and related lattice tools are used in +cryptography, digital communication, and integer programming. LLLplus +includes Lenstra-Lenstra-Lovacsz (LLL), Brun, +and Seysen lattice reduction; VBLAST matrix decomposition; and a closest vector +problem (CVP) solver. The historical and practical prominence of the +LLL technique in lattice tools is the reason for its use in the +name "LLLplus". + +[LLL](https://en.wikipedia.org/wiki/Lenstra%E2%80%93Lenstra%E2%80%93Lov%C3%A1sz_lattice_basis_reduction_algorithm) +[1] lattice reduction is a powerful tool that is widely used in +cryptanalysis, in cryptographic system design, in digital +communications, and to solve other integer problems. LLL reduction is +often used as an approximate solution to the +[shortest vector problem](https://en.wikipedia.org/wiki/Lattice_problem#Shortest_vector_problem_.28SVP.29) +(SVP). We also include Gauss/Lagrange, Brun [2] and Seysen [3] +lattice reduction techniques. The LLL, Brun, and +Seysen algorithms are based on [4]. The +[CVP](https://en.wikipedia.org/wiki/Lattice_problem#Closest_vector_problem_.28CVP.29) +solver is based on [5] and can handle lattices and bounded integer +constellations. + +We also include code to do a +[Vertical-Bell Laboratories Layered Space-Time](https://en.wikipedia.org/wiki/Bell_Laboratories_Layered_Space-Time) +(V-BLAST) [6] matrix decomposition which is used in digital +communications. The LLL, Brun, Seysen, V-BLAST, and CVP functions are +used to solve (exactly or approximately) CVP problems in encoding and +decoding multi-terminal signals. + +Another important application of is in cryptanalysis: attacking the +security of security systems. As an example of the sort of tools that +are used in cryptanalysis, see the `subsetsum` +function. Another important application is in integer programming, +where the LLL algorithm has been shown to solve the integer +programming feasibility problem; see `integerfeasibility`. + +### Examples + +Here are a few examples of using the functions in the +package on random lattices. + +```julia +Pkg.add("LLLplus") +using LLLplus + +# Time LLL, VBLAST decomposition of a complex matrix with randn entries +N = 1000; +H = randn(N,N) + im*randn(N,N); +println("Testing LLL on $(N)x$(N) complex matrix...") +@time B,T,Q,R = lll(H); +M = 200; +println("Testing VBLAST on $(M)x$(M) chunk of same matrix...") +@time W,P,B = vblast(H[1:M,1:M]); + +# Time LLL, Seysen decompositions of a 100x100 Int64 matrix with +# rand entries distributed uniformly between -100:100 +N = 100; +H = rand(-100:100,N,N); +println("Testing LLL on $(N)x$(N) real matrix...") +@time B,T,Q,R = lll(H); +println("Testing Seysen on same $(N)x$(N) matrix...") +@time B,T = seysen(H); +``` + +### Execution Time results + +The following performance results are obtained from the +following command in the top-level LLLplus directory: +`julia -e 'include("benchmark/perftest.jl")'` +In the tests we time execution of the lattice-reduction functions, +average the results over multiple random matrices, and show results as +a function of the size of the matrix and of the data type. + +We first show how the time varies with matrix size (1,2,4,...256); the +vertical axis shows execution time on a logarithmic scale; the x-axis +is also logarithmic. The generally linear nature of the LLL curve supports +the polynomial-time nature of the algorithm. Each data point +is the average of execution time of 40 runs of a lattice-reduction +technique, where the matrices used were generated using 'randn' to +emulate unit-variance Gaussian-distributed values. + +![Time vs matrix size](../../benchmark/perfVsNfloat64.png) + +Though the focus of the package is on floating-point, +all the modules can handle a variety of data types. In the next figure +we show execution time for several datatypes (Int32, Int64, +Int128, Float32, Float64, DoubleFloat, BitInt, and BigFloat) which are used to +generate 40 128x128 matrices, over which execution time for the lattice +reduction techniques is averaged. The vertical axis is a logarithmic +representation of execution time as in the previous +figure. + +![Time vs data type](../../benchmark/perfVsDataType.png) + +The algorithm pseudocode in the monograph [7] and the survey paper [4] +were very helpful in writing the lattice reduction tools in LLLplus +and are a good resource for further study. If you are trying to break +one of the [Lattice Challenge](http://www.latticechallenge.org) +records or are looking for robust, well-proven lattice tools, look at +[fplll](https://github.com/fplll/fplll). Also, for many +number-theoretic problems the +[Nemo.jl](https://github.com/wbhart/Nemo.jl) package is appropriate; +it uses the [FLINT](http://flintlib.org/) C library to do LLL +reduction on Nemo-specific data types. +Finally, LLLplus should have version number of about 0.2.0 rather +than 1.2.0; please treat the package as experimental. + +### Future + +Possible improvements include: +* Add Block-Korkin-Zolotarev lattice reduction, with improvements + as in [8], code for Babai's CVP approximation [9], and explicit CVP + approximations using LLL, Brun, Seysen, and VBLAST. +* The [SVP](http://www.latticechallenge.org/svp-challenge/) Challenge + and the + [Ideal](http://www.latticechallenge.org/ideallattice-challenge/) + Lattice challenge have code to generate lattices + which could be used to make challenging performance tests. +* Compare with the [fplll](https://github.com/fplll/fplll) library, + the [Number Theory Library](http://www.shoup.net/ntl/), and + NEMO/FLINT. + + + +### References + +[1] A. K. Lenstra; H. W. Lenstra Jr.; L. Lovász, ["Factoring polynomials with rational coefficients"](http://ftp.cs.elte.hu/~lovasz/scans/lll.pdf). Mathematische Annalen 261, 1982 + +[2] V. Brun, +["En generalisation av kjedebrøken I,"](https://archive.org/stream/skrifterutgitavv201chri#page/300/mode/2up) +Skr. Vidensk. Selsk. Kristiana, Mat. Nat. Klasse, 1919. + +[3] M. Seysen, ["Simultaneous reduction of a lattice basis and its reciprocal basis"](http://link.springer.com/article/10.1007%2FBF01202355) Combinatorica, 1993. + +[4] D. Wuebben, D. Seethaler, J. Jalden, and G. Matz, ["Lattice Reduction - A Survey with Applications in Wireless Communications"](http://www.ant.uni-bremen.de/sixcms/media.php/102/10740/SPM_2011_Wuebben.pdf). IEEE Signal Processing Magazine, 2011. + +[5] A. Ghasemmehdi, E. Agrell, ["Faster Recursions in Sphere Decoding"](https://publications.lib.chalmers.se/records/fulltext/local_141586.pdf) IEEE +Transactions on Information Theory, vol 57, issue 6 , June 2011. + +[6] P. W. Wolniansky, G. J. Foschini, G. D. Golden, R. A. Valenzuela, ["V-BLAST: An Architecture for Realizing Very High Data Rates Over the Rich-Scattering Wireless Channel"](http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=738086). Proc. URSI +ISSSE: 295–300, 1998. + +[7] M. R. Bremner, ["Lattice Basis Reduction: An Introduction to the LLL + Algorithm and Its Applications"](https://www.amazon.com/Lattice-Basis-Reduction-Introduction-Applications/dp/1439807027) CRC Press, 2012. + +[8] Y. Chen, P. Q. Nguyen, ["BKZ 2.0: Better Lattice Security Estimates"](http://www.iacr.org/archive/asiacrypt2011/70730001/70730001.pdf). Proc. ASIACRYPT 2011. + +[9] L. Babai, ["On Lovász’ lattice reduction and the nearest lattice point problem"](https://link.springer.com/article/10.1007/BF02579403), +Combinatorica, 1986. + + + +## List of Functions +```@index +``` diff --git a/src/LLLplus.jl b/src/LLLplus.jl index 6b648ec..24e398d 100644 --- a/src/LLLplus.jl +++ b/src/LLLplus.jl @@ -1,3 +1,10 @@ +""" +Main module for `LLLplus.jl` -- lattice reduction and related tools for Julia. + +As an example of the functions in the package, see [`lll`](@ref), which does +Lenstra–Lenstra–Lovász lattice reduction of a matrix. + +""" module LLLplus using LinearAlgebra @@ -7,18 +14,22 @@ export lll, cvp, svp, + brun, gauss, seysen, vblast, subsetsum, integerfeasibility, - hard_sphere + hardsphere, hard_sphere, + issizereduced,islllreduced,orthogonalitydefect,hermitefactor,seysencond include("lll.jl") # lll, gauss include("cvp.jl") # cvp, svp +include("brun.jl") include("seysen.jl") include("vblast.jl") include("applications.jl") # subsetsum, integerFeasibility +include("utilities.jl") include("hard_sphere.jl") # may be deprecated in future diff --git a/src/applications.jl b/src/applications.jl index ccd93e4..300f1dd 100644 --- a/src/applications.jl +++ b/src/applications.jl @@ -7,9 +7,13 @@ system. This is the *integer programming feasibility problem*. If `nullVecs==true`, then as well as returning a solution `x`, also return a matrix `xNull` of vectors in the null space of `A` which could be added to the `x` vector to find a solution which satisfies a constraint such as `0 .≤ x -.≤ u`; see the paper below. +.≤ u`; see the paper below. -*Solving A System Of Diophantine Equations With Bounds On The Variables* by +This function is not intended to be a robust implementation of an integer +programming feasibility solution, rather its purpose is to give a flavor +of what problems the LLL technique can help with. + +"Solving A System Of Diophantine Equations With Bounds On The Variables" by Karen Aardal, Cor Hurkens, and Arjen Lenstra in Integer Programming and Combinatorial Optimization, 6th International IPCO Conference, vol 1412, pp 229-242, 1998. See http://softlib.rice.edu/pub/CRPC-TRs/reports/CRPC-TR98782.pdf @@ -17,6 +21,7 @@ Combinatorial Optimization, 6th International IPCO Conference, vol 1412, pp # Examples ```jldoctest julia> A=[10 1 -9; 1 8 8]; xtrue=[0; 2; 9]; d=A*xtrue; + julia> integerfeasibility(A,d) 3-element Array{Int64,1}: 0 @@ -24,7 +29,8 @@ julia> integerfeasibility(A,d) 9 julia> n=20;m=30; A = rand(-10:10,n,m); xtrue = rand(0:10,m); d=A*xtrue; -julia> sum(abs.(xtrue-integerfeasibility(A,d))) + +julia> sum(abs.(xtrue - integerfeasibility(A,d) )) 0 ``` @@ -62,7 +68,9 @@ solution. This follows the technique described by Lagarias and Odlyzko in "Solving Low-Density Subset Sum Problems" in Journal of ACM, Jan 1985. Code based on http://web.eecs.umich.edu/~cpeikert/lic15/lec05.pdf -The original technique came from Lagarias and Odlyzko. +The original technique came from Lagarias and Odlyzko. We can likely +get better results using techniques described and referencd in +https://www-almasty.lip6.fr/~joux/pages/papers/ToolBox.pdf It's odd that permuting the `a` vector in the second example given below causes the alg to often not find a solution. The example doesn't have the @@ -72,17 +80,20 @@ all. # Examples ```jldoctest julia> a=[1,2,4,9,20,38]; s=30; x=subsetsum(a,s); s-x'*a -0 +0.0 julia> a=[32771,65543,131101,262187,524387,1048759, # from Bremner p 117 2097523,4195057,8390143,16780259,33560539, 67121039,134242091,268484171,536968403]; + julia> s=891221976; x=subsetsum(a,s); s-x'*a -0 +0.0 julia> N=40;a=rand(1:2^BigInt(256),N);xtrue=rand(Bool,N); s=a'*xtrue; + julia> setprecision(BigFloat,300); x=subsetsum(a,s); s-x'*a -0 +0.0 + ``` """ function subsetsum(a::AbstractArray{Ti,1},s::Ti) where {Ti<:Integer} @@ -90,7 +101,7 @@ function subsetsum(a::AbstractArray{Ti,1},s::Ti) where {Ti<:Integer} n = length(a) - flag = false; + flag = false if s=1 - print("A non-binary solution was found\n") + print("A non-binary solution was found; check that it's correct\n") return Bp[:,ixMatch[1][2]] else print("A solution was not found\n") end end - return NaN + return missing.*Bp[:,1] end + diff --git a/src/brun.jl b/src/brun.jl new file mode 100644 index 0000000..7fb2444 --- /dev/null +++ b/src/brun.jl @@ -0,0 +1,73 @@ +""" + B, T = brun(H) + +Brun's integer-relations alg implemented as a matrix decomposition. Takes as +input the matrix `H` and returns a reduced basis `B` and `T`, a unimodular +transformation matrix such that `B = H*T`. Brun reduction is often done with +`pinv(H)` as input to yield `B = pinv(H)*T`. + +See V. Brun, "En generalisation av kjedebrøken I," Skr. Vid +ensk. Selsk. Kristiana, Mat. Nat. Klasse, 1919. See +https://archive.org/stream/skrifterutgitavv201chri#page/300/mode/2up + +Follows code from D. Wuebben, D. Seethaler, J. Jalden, and G. Matz, "Lattice +Reduction - A Survey with Applications in Wireless Communications" IEEE +Signal Processing Magazine, March 2011 + +# Examples +```jldoctest +julia> H=[1 2; 3 4]; B,T=brun(H); T +2×2 Array{Int64,2}: + 3 -1 + -2 1 + +``` +""" +function brun(H::AbstractArray{Td,2}) where Td + + # get size + K, M = size(H) + + if rank(H) < K + error("Input basis does not have full row rank") + end + + Ti= getIntType(Td) + + grammian = H'*H + metric_vec = sqrt.(abs.(diag(grammian'))) + mu = grammian[:,1] # initialize mu with an arbitrary column of the Grammian + T = Matrix{Ti}(I, K, K) # unimodular transformation matrix + B = copy(H) + + doBrun = true + while doBrun + + # Calculation of Brun's update value + zw,s = findmax(mu) + zw = copy(mu) + zw[s] = -typemax(zw[1]) + zw,t = findmax(zw) + r = round(mu[s]/mu[t]) + + # basis update + updated_vec = B[:,s] .- r'*B[:,t] + + # if metric can be improved + updated_norm = norm(updated_vec) + if updated_norm < metric_vec[s] + + # update mu, unimodular and reduced matrices, metric + mu[s] -= r*mu[t] + T[:,s] .-= r'*view(T,:,t) #T[:,s] -= r'*T[:,t] + B[:,s] .= updated_vec + metric_vec[s] = updated_norm + + else + doBrun = false + end + + end # + + return B, T +end diff --git a/src/cvp.jl b/src/cvp.jl index 6473c4c..2abda61 100644 --- a/src/cvp.jl +++ b/src/cvp.jl @@ -19,12 +19,16 @@ julia> H=[1 2; 3 4]; Q,R=qr(H); uhat = cvp(Q'*[0,2],R) -1.0 julia> n=100;H=randn(n,n);Q,R=qr(H); + julia> u=Int.(rand(0:1e10,n));y=H*u+rand(n)/100; + julia> uhat=cvp(Q'*y,R); sum(abs.(u-uhat)) 0.0 julia> n=500;H=randn(n,n);Q,R=qr(H); + julia> u=Int.(rand(-1:1,n));y=H*u+rand(n)/10; + julia> uhat=cvp(Q'*y,R,Val{false},1); sum(abs.(u-uhat)) 0.0 ``` @@ -165,3 +169,4 @@ function svp(B::AbstractArray{Td,2}) where Td idx = sortperm(c) return V[:,idx[1]] end + diff --git a/src/hard_sphere.jl b/src/hard_sphere.jl index 78289f6..bfc0960 100644 --- a/src/hard_sphere.jl +++ b/src/hard_sphere.jl @@ -1,21 +1,37 @@ -# @doc """ -#x=hard_sphere(y,H,Nc) -# Solve the problem argmin_x ||y-Hx||. The input vector y is of -# length N, with H of dimension N by M, and the returned vector x of -# length M. If Nc is even the elements of x are integers from -# [0:Nc-1]*2-(Nc-1); odd Nc is not yet supported. For Nc=2, we're -# searching the 2PAM constellation of [-1,1], and for nInt=4 we're -# searching [-3,-1,1,3]. -#X=hard_sphere(Y,H,Nc) -# The input may be a matrix Y of dimension N by Ns, in which case the -# problem is solved for each column of Y, with the solutions in the -# columns of X. -# +""" + x=hardsphere(y,H,Nc) + + Solve the problem argmin_x ||y-Hx||. The input vector y is of + length N, with H of dimension N by M, and the returned vector x of + length M. If Nc is even the elements of x are integers from + [0:Nc-1]*2-(Nc-1); odd Nc is not yet supported. For Nc=2, we're + searching the 2PAM constellation of [-1,1], and for nInt=4 we're + searching [-3,-1,1,3]. + + Digital communication researchers often call the Fincke-Pohst search + algorithm used in this function as the "sphere-decoder" because of its use + in decoding digital signals and for the use of a multi-dimensional sphere + to constrain search complexity. This function uses only hard-decision + decoding, hence the term "hardsphere". To complete the vocabulary lesson, + the problem that number theorists call the "closest vector problem" is + labeled "integer least squares" by many GPS researchers. + +X=hardsphere(Y,H,Nc) + + The input may be a matrix Y of dimension N by Ns, in which case the + problem is solved for each column of Y, with the solutions in the + columns of X. + # Examples: -# X = hard_sphere([1; 2], [1 2; 3 4],2) -# X = hard_sphere(rand(0:20,2,15), [1 2; 3 4],2) -# """ -> -function hard_sphere(Y::AbstractArray{Td,2},H::AbstractArray{Td,2},Nc::Integer) where {Td} +```jldoctest +julia> X = hardsphere([1. 1]', [1. 2; 3 4],2) +2×1 Array{Int64,2}: + -1 + 1 + +``` +""" +function hardsphere(Y::AbstractArray{Td,2},H::AbstractArray{Td,2},Nc::Integer) where {Td} (N,M) = size(H); Qc = ones(M)*Nc; @@ -34,21 +50,39 @@ function hard_sphere(Y::AbstractArray{Td,2},H::AbstractArray{Td,2},Nc::Integer) Xh = zeros(Int,M,Ns); for ns = 1:Ns yp = (Q'*Y[:,ns] + R*ones(M,1).*xoffset)/xmult; - xp = algII_smart(yp,R,Qc); + xp = algIIsmart(yp,R,Qc); Xh[:,ns] = xp*xmult - xoffset; end return Xh end -#########################################################################% -function algII_smart(yp,R,Qc) - # xh = algII_smart(yp,R,Qc) - # - # Find the closest xh for yp=R*xh + w, where w is some unknown noise and - # x(i) is from the range [0,...,Qc(i)-1]. This implements the "Alg II-smart" - # hard-decision sphere decoder as in Damen, El Gamal, and Caire, Trans It - # 03. +""" + hard_sphere(...) = hardsphere(...) + +See hardsphere; in a future version hard_sphere will be removed. +""" +hard_sphere(x...) = hardsphere(x...) + +""" + xh = algIIsmart(yp,R,Qc) + +Find the closest xh for yp=R*xh + w, where w is some unknown noise and x(i) +is from the range [0,...,Qc(i)-1]. This implements the "Alg II-smart" +hard-decision sphere decoder as in Damen, El Gamal, and Caire, Trans It +03. This function is not exported from LLLplus at present and may disappear +in the future. + +# Examples: +```jldoctest +julia> X = algIIsmart([5. 8]', [1. 2; 0 4],[4,4]) +2×1 Array{Float64,2}: + 1.0 + 2.0 + +``` +""" +function algIIsmart(yp,R,Qc) (N,M) = size(R); xh = zeros(M,1); # the best solution diff --git a/src/lll.jl b/src/lll.jl index 4192c3a..569dcb4 100644 --- a/src/lll.jl +++ b/src/lll.jl @@ -24,6 +24,7 @@ julia> H= BigFloat.([1.5 2; 3 4]) .+ 2im; B,_= lll(H); B 1.0+0.0im 0.0+0.0im julia> N=500;H = randn(N,N); B,T = lll(H); + ``` """ function lll(H::AbstractArray{Td,2},δ::Float64=3/4) where {Td<:Number} @@ -37,27 +38,10 @@ L = size(B,2); Qt,R = qr(B); Q = Matrix(Qt); # A few cycles can be saved by skipping updates of the Q matrix. -# of course these should be replaced with different methods... I'll do it -# someday -if Td<:BigInt || Td<:BigFloat - Ti = BigInt -elseif Td==Float32 || Td==Int32 - Ti=Int32 -elseif Td==Int16 - Ti=Int16 -elseif Td==Int8 - Ti=Int8 -else - Ti=Int -end -#Ti = (Td<:BigInt || Td<:BigFloat) ? BigInt : Int - -if Td<:Complex - T = Matrix{Complex{Ti}}(I, L, L) -else - T = Matrix{Ti}(I, L, L) -end - +Ti= getIntType(Td) +T = Matrix{Ti}(I, L, L) +zeroTi = real(zero(Ti)) + lx = 2; while lx <= L @@ -65,7 +49,7 @@ while lx <= L for k=lx-1:-1:1 rk = R[k,lx]/R[k,k] mu = roundf(rk) - if abs(mu)>zero(Ti) + if abs(mu)>zeroTi # B[:,lx] -= mu * B[:,k] # R[1:k,lx] -= mu * R[1:k,k] # T[:,lx] -= mu * T[:,k] @@ -107,7 +91,7 @@ end """ B = gauss(H) -Do Gauss (or Lagrange?!) reduction on the lattice defined by the two columns of +Do Gauss/Lagrange reduction on the lattice defined by the two columns of H. Follows Fig 2.3 of "Lattice Basis Reduction: An Introduction to the LLL @@ -119,6 +103,7 @@ julia> H = [1 2; 3 3]; B = gauss(H) 2×2 Array{Float64,2}: 1.0 0.0 0.0 3.0 + ``` """ function gauss(H::AbstractArray{Td,2}) where Td @@ -144,3 +129,38 @@ function gauss(H::AbstractArray{Td,2}) where Td end return [v1 v2] end + + +""" + getIntType(Td) + + Return an integer type that matches Td. I.e. BigInt for Td=BigFloat, Int64 + for Td=Float64. Should replace this with multiple functions; the + following gives a hint, but it's not correct: + +getIntType(Td::Complex{Tr}) = Complex{getIntType(Tr)} +getIntType(Td) where {Td<:Integer} = Td +getIntType(<:Float64) = Int64 +getIntType(<:Float32) = Int32 +getIntType(<:Float16) = Int16 +getIntType(<:BigFloat) = BigInt + +""" +function getIntType(Td) + Tr = real(Td) + if Tr<:BigInt || Tr<:BigFloat + Ti = BigInt + elseif Tr==Float32 || Tr==Int32 + Ti=Int32 + elseif Tr==Int16 + Ti=Int16 + elseif Tr==Int8 + Ti=Int8 + else + Ti=Int + end + if Td<:Complex + Ti = Complex{Ti} + end + return Ti +end diff --git a/src/seysen.jl b/src/seysen.jl index 07583a6..a54051a 100644 --- a/src/seysen.jl +++ b/src/seysen.jl @@ -4,7 +4,7 @@ Do greedy Seysen lattice reduction on the matrix `H`, returning `B`, the reduced lattice basis; `T` a unimodular matrix that reduces `H` (i.e. `B = H*T`); `B_dual`, dual lattice basis (i.e., `B_dual = pinv(B)`); and num_it the number -of iterations (basis updates). See also [`lll`](@ref), and [`brun`](@ref). +of iterations (basis updates). See also [`lll`](@ref). Follows Seysen algorithm in "Lattice Reduction - A Survey with Applications in Wireless Communications" by D. Wuebben, et al, IEEE Signal Processing @@ -12,10 +12,10 @@ Magazine, 2011. # Examples ```jldoctest -julia> H= [1 2; 3 4];B,_ = seysen(H); B +julia> H= [1 2; 3 4];B,T = seysen(H); B 2×2 Array{Int64,2}: - 1 -1 - 1 1 + -1 1 + 1 1 julia> H= BigFloat.([1.5 2; 3 4]) .+ 2im; B,_= seysen(H); B 2×2 Array{Complex{BigFloat},2}: diff --git a/src/utilities.jl b/src/utilities.jl new file mode 100644 index 0000000..a888086 --- /dev/null +++ b/src/utilities.jl @@ -0,0 +1,167 @@ +""" + orthogonalitydefect(B) + +Find the orthogonality defect of the matrix B defined, for example, +on page 2 of Bennet + +[Bennet](http://users.eecs.northwestern.edu/~hbennett/publications/bases.pdf) + +# Examples +```jldoctest +julia> H= [1 2; 3 4];B,T=lll(H); + +julia> [orthogonalitydefect(H) orthogonalitydefect(B)] +1×2 Array{Float64,2}: + 7.07107 1.0 + +``` +""" +function orthogonalitydefect(B) + N = size(B,2) + prod = one(B[1,1]) + for n=1:N + prod *= sqrt(B[:,n]'*B[:,n]) + end + return prod/abs(det(B)) +end + + + + +""" + hermitefactor(B) + +Find the Hermite factor of matrix B + +# Examples +```jldoctest +julia> H= [1 2; 3 4];hermitefactor(H) +1.5811388300841898 + +``` +""" +hermitefactor(B) = sqrt(B[:,1]'*B[:,1])/abs(det(B)) + + + + +""" + seysencond(B) + +Seysen condition number as on, for example, page 3 of Bennet + +[Bennet](http://users.eecs.northwestern.edu/~hbennett/publications/bases.pdf) + +# Examples +```jldoctest +julia> H= [1 2; 3 4];seysencond(H) +2.8284271247461903 + +``` +""" +function seysencond(B) + N = size(B,2) + Bi = inv(B)' + quotients = zeros(typeof(Bi[1,1]),N) + for n=1:N + quotients[n] = sqrt(B[:,n]'*B[:,n])/sqrt(Bi[:,n]'*Bi[:,n]) + end + return maximum(quotients) +end + + + +""" + islllreduced(B) + +Determine if the matrix B is LLL reduced or not. See p 56 of Bremner for a +definition. + +M. R. Bremner, "Lattice Basis Reduction: An Introduction to the LLL + Algorithm and Its Applications" CRC Press, 2012. + + +# Examples +```jldoctest +julia> H= [1 2; 3 4];islllreduced(H) +false + +julia> B,T=lll(H);islllreduced(B) +true + +``` +""" +function islllreduced(B) +# follows p 56 of Bremner + Td = eltype(B) + Xs,M = gso(B) + isSizeReduced = all((M-I)[:]*2 .< one(Td)) + n = size(B,2) + xiSq = zeros(float(Td),n) + for i=1:n + xiSq[i] = Xs[:,i]'*Xs[:,i] + end + deltas = zeros(float(Td),n-1) + for i=2:n + deltas[i-1] = xiSq[i]/xiSq[i-1]+M[i-1,i]^2 + end + if isSizeReduced && minimum(deltas)>1/4 + return true + else + return false + end +end + +""" + issizereduced(B) + +Determine if the matrix B is size reduced or not. + +# Examples +```jldoctest +julia> H= [1 2; 3 4];issizereduced(H) +false + +julia> B,T = lll(H);issizereduced(B) +true + +``` +""" +function issizereduced(B) + # to-do: replace gso with QR as on p 148 of LLL survey book + Xs,M = gso(B) + return all((M-I)[:]*2 .< one(eltype(B))) +end + + +""" + Q,R = gso(H) + +Find Gram-Schmidt orthogonal basis; H=Q*R +This is not quite a QR decomposition. + +# Examples +```jldoctest +julia> H = [1 2; 3 4]; Q,R = gso(H) + +``` +""" +function gso(H::AbstractArray{Td,2}) where {Td} +# This is classic GS; see comparison between classic and modified in +# http://www.cis.upenn.edu/~cis610/Gram-Schmidt-Bjorck.pdf + +# See also Fig 3.1 of "Lattice Basis Reduction: An Introduction to the LLL +# Algorithm and Its Applications" by Murray R. Bremner, CRC Press, 2012. + + Q = float.(H) # Xs + n = size(H,2) + R = Matrix{float(Td)}(I,n,n) # M or μ + + for i=1:n + for j=1:i-1 + R[j,i] = H[:,i]'*Q[:,j]/(Q[:,j]'*Q[:,j]) + Q[:,i]-= R[j,i]*Q[:,j] + end + end + return Q,R +end diff --git a/src/vblast.jl b/src/vblast.jl index 4daaf2f..664561c 100644 --- a/src/vblast.jl +++ b/src/vblast.jl @@ -1,17 +1,32 @@ +""" + W,P,B = vblast(H) + +Find a VBLAST decomposition of `H` such that `H = pinv(W)*B*P'` or `B = +W*H*P`. Here `P` is a permutation matrix, `B` is lower triangular with ones +on the diagonal, and `W` has orthogonal rows. + + W,P,B = vblast(H,mu) + +If an SNR argument `mu` is passed in, a regularized ("MMSE") decomposition +is done, with the result that `W` will no longer have orthogonal rows and +`B` is no longer lower triangular. + + +# Examples +```jldoctest +julia> H= [1. 2; 3 4];W,_ = vblast(H); W +2×2 Array{Float64,2}: + 1.5 -0.5 + 0.1 0.3 + +julia> H= BigFloat.([1.5 2; 3 4]) .+ 2im; W,_= vblast(H); W +2×2 Array{Complex{BigFloat},2}: + -2.0+3.0im 2.0-1.5im + 0.0779221-0.103896im 0.155844-0.103896im + +``` +""" function vblast(H::Array{T,2},mu=Inf) where {T} -# (W,P,B) = vblast(H) -# Returns a VBLAST matrix decomposition of H such that -# H = pinv(W)*B*P' or B = W*H*P. Here P is a permutation matrix, B is -# lower triangular with ones on the diagonal, and W has orthogonal rows. -# (W,P,B) = vblast(H,mu) -# If an SNR argument mu is passed in, a regularized ("MMSE") -# decomposition is done, with the result that W will no longer have -# orthogonal rows and B is no longer lower triangular. -# -# Example: For decoding with N receivers -# N=4;H = rand(N,N); include("vblast.jl");(W,P,B) = vblast(H) -# Example: For MMSE decoding with N receivers mu=rho/N -# (W,P,B) = vblast(H,rho/size(H,1)); (K,N) = size(H); diff --git a/test/runtests.jl b/test/runtests.jl index e8a8965..adae078 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -62,6 +62,9 @@ println("Testing Seysen on same $(N)x$(N) complex matrix...") println("Testing VBLAST on same $(N)x$(N) complex matrix...") @time (W,P,B) = vblast(H); @time (W,P,B) = vblast(H); +println("Testing Brun on real part of same $(N)x$(N) matrix...") +@time (B,T) = brun(real.(H)); +@time (B,T) = brun(real.(H)); # Test sphere decoder Ns = 100000; @@ -73,8 +76,8 @@ C = [-1,1]; Z = rand(1:2,N,Ns); X = C[Z]; Y = H*X+NN; -@time Xt = hard_sphere(Y,H,2); -@time Xt = hard_sphere(Y,H,2); +@time Xt = hardsphere(Y,H,2); +@time Xt = hardsphere(Y,H,2); errRate = sum(abs.(X-Xt))/Ns; println("Error Rate is $(errRate). It should be zero or very small.\n")