diff --git a/CHANGELOG.md b/CHANGELOG.md index 62dad742..aa8b5d39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,17 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.5.3] - 2024-08-16 + +### Fixed +- Typo: `node_coorinates_unit_cube` -> `node_coordinates_unit_cube`. +- Bug in `nullspace_linear_elasticity`. +- Bug in `PVector` when working in split format. + +### Deprecated + +- `near_nullspace_linear_elasticity` in favor of `nullspace_linear_elasticity`. + ## [0.5.2] - 2024-08-13 ### Added diff --git a/Project.toml b/Project.toml index 49939155..73c8b770 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "PartitionedArrays" uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" authors = ["Francesc Verdugo and contributors"] -version = "0.5.2" +version = "0.5.3" [deps] CircularArrays = "7a955b69-7140-5f4e-a0ed-f168c5e2e749" diff --git a/src/PartitionedArrays.jl b/src/PartitionedArrays.jl index 5575613c..eef27803 100644 --- a/src/PartitionedArrays.jl +++ b/src/PartitionedArrays.jl @@ -180,7 +180,9 @@ include("p_timer.jl") export laplacian_fdm export laplacian_fem export linear_elasticity_fem -export node_coorinates_unit_cube +export node_coordinates_unit_cube +export nullspace_linear_elasticity +export nullspace_linear_elasticity! export near_nullspace_linear_elasticity include("gallery.jl") diff --git a/src/gallery.jl b/src/gallery.jl index 231dcce1..f68fec9d 100644 --- a/src/gallery.jl +++ b/src/gallery.jl @@ -415,12 +415,12 @@ function node_to_dof_partition(node_partition,D) dof_partition end -function node_coorinates_unit_cube( +function node_coordinates_unit_cube( nodes_per_dir, # free (== interior) nodes parts_per_dir, parts, ; - split_format = Val(true), + split_format = Val(false), value_type::Type{Tv} = Float64,) where Tv function setup!(own_x,mynodes) @@ -443,7 +443,12 @@ function node_coorinates_unit_cube( x end -function near_nullspace_linear_elasticity(x, +function near_nullspace_linear_elasticity(a...;b...) + @warn "near_nullspace_linear_elasticity is deprecated, use nullspace_linear_elasticity instead" + nullspace_linear_elasticity(a...;b...) +end + +function nullspace_linear_elasticity(x, row_partition = node_to_dof_partition(partition(axes(x,1)),length(eltype(x))) ) T = eltype(x) @@ -461,10 +466,10 @@ function near_nullspace_linear_elasticity(x, dof_partition = row_partition split_format = Val(eltype(partition(x)) <: SplitVector) B = [ pzeros(Tv,dof_partition;split_format) for _ in 1:nb ] - near_nullspace_linear_elasticity!(B,x) + nullspace_linear_elasticity!(B,x) end -function near_nullspace_linear_elasticity!(B,x) +function nullspace_linear_elasticity!(B,x) D = length(eltype(x)) if D == 1 foreach(own_values(B[1])) do own_b diff --git a/src/p_vector.jl b/src/p_vector.jl index ca7a7fac..ea75e03c 100644 --- a/src/p_vector.jl +++ b/src/p_vector.jl @@ -527,6 +527,7 @@ struct SplitVectorAssemblyCache{A,B,C,D} buffer_snd::C # NB buffer_rcv::C exchange_setup::D + reversed::Bool end function Base.reverse(a::SplitVectorAssemblyCache) SplitVectorAssemblyCache( @@ -537,6 +538,7 @@ function Base.reverse(a::SplitVectorAssemblyCache) a.buffer_rcv, a.buffer_snd, a.exchange_setup, + !(a.reversed), ) end function copy_cache(a::SplitVectorAssemblyCache) @@ -549,23 +551,29 @@ function copy_cache(a::SplitVectorAssemblyCache) a.own_indices_rcv, buffer_snd, buffer_rcv, - a.exchange_setup + a.exchange_setup, + a.reversed, ) end function p_vector_cache_impl(::Type{<:SplitVector},vector_partition,index_partition) neighbors_snd,neighbors_rcv= assembly_neighbors(index_partition) indices_snd,indices_rcv = assembly_local_indices(index_partition,neighbors_snd,neighbors_rcv) - map(indices_snd,indices_rcv,index_partition) do ids_snd,ids_rcv,myids + ghost_indices_snd = map(indices_snd) do ids + JaggedArray(copy(ids.data),ids.ptrs) + end + own_indices_rcv = map(indices_rcv) do ids + JaggedArray(copy(ids.data),ids.ptrs) + end + foreach(ghost_indices_snd,own_indices_rcv,index_partition) do ids_snd,ids_rcv,myids map_local_to_ghost!(ids_snd.data,myids) map_local_to_own!(ids_rcv.data,myids) end - ghost_indices_snd = indices_snd - own_indices_rcv = indices_rcv - buffers_snd,buffers_rcv = map(assembly_buffers,vector_partition,indices_snd,indices_rcv) |> tuple_of_arrays + buffers_snd,buffers_rcv = map(assembly_buffers,vector_partition,ghost_indices_snd,own_indices_rcv) |> tuple_of_arrays graph = ExchangeGraph(neighbors_snd,neighbors_rcv) exchange_setup = setup_exchange(buffers_rcv,buffers_snd,graph) - SplitVectorAssemblyCache(neighbors_snd,neighbors_rcv,ghost_indices_snd,own_indices_rcv,buffers_snd,buffers_rcv,exchange_setup) + reversed = false + SplitVectorAssemblyCache(neighbors_snd,neighbors_rcv,ghost_indices_snd,own_indices_rcv,buffers_snd,buffers_rcv,exchange_setup,reversed) end function p_vector_cache_impl(::Type{<:SplitVector{<:JaggedArray}},vector_partition,index_partition) @@ -610,6 +618,7 @@ function assemble_impl!(f,vector_partition,cache::JaggedArrayAssemblyCache) end function assemble_impl!(f,vector_partition,cache::SplitVectorAssemblyCache) + reversed = cache.reversed ghost_indices_snd=cache.ghost_indices_snd own_indices_rcv=cache.own_indices_rcv neighbors_snd=cache.neighbors_snd @@ -618,7 +627,11 @@ function assemble_impl!(f,vector_partition,cache::SplitVectorAssemblyCache) buffer_rcv=cache.buffer_rcv exchange_setup=cache.exchange_setup foreach(vector_partition,ghost_indices_snd,buffer_snd) do values,ghost_indices_snd,buffer_snd - ghost_vals = values.blocks.ghost + if reversed + ghost_vals = values.blocks.own + else + ghost_vals = values.blocks.ghost + end for (p,hid) in enumerate(ghost_indices_snd.data) buffer_snd.data[p] = ghost_vals[hid] end @@ -629,7 +642,11 @@ function assemble_impl!(f,vector_partition,cache::SplitVectorAssemblyCache) @fake_async begin wait(t) foreach(vector_partition,own_indices_rcv,buffer_rcv) do values,own_indices_rcv,buffer_rcv - own_vals = values.blocks.own + if reversed + own_vals = values.blocks.ghost + else + own_vals = values.blocks.own + end for (p,oid) in enumerate(own_indices_rcv.data) own_vals[oid] = f(own_vals[oid],buffer_rcv.data[p]) end diff --git a/test/gallery_tests.jl b/test/gallery_tests.jl index 07fb9a13..20c4905a 100644 --- a/test/gallery_tests.jl +++ b/test/gallery_tests.jl @@ -32,20 +32,44 @@ function gallery_tests(distribute,parts_per_dir) @test isa(y,PVector) A = psparse(SparseMatrixCSR{1,Float64,Int32},args...) |> fetch A |> centralize |> display - Y = A*pones(axes(A,2)) + y = A*pones(axes(A,2)) @test isa(y,PVector) args = linear_elasticity_fem(nodes_per_dir,parts_per_dir,ranks) A = psparse(args...) |> fetch A |> centralize |> display - Y = A*pones(axes(A,2)) + y = A*pones(axes(A,2)) @test isa(y,PVector) - x = node_coorinates_unit_cube(nodes_per_dir,parts_per_dir,ranks) - B = near_nullspace_linear_elasticity(x) + x = node_coordinates_unit_cube(nodes_per_dir,parts_per_dir,ranks) + B = nullspace_linear_elasticity(x) @test isa(B[1],PVector) - B = near_nullspace_linear_elasticity(x,partition(axes(A,2))) + y = A*pones(axes(A,2)) + @test isa(y,PVector) + B = nullspace_linear_elasticity(x,partition(axes(A,2))) @test isa(B[1],PVector) + y = A*pones(axes(A,2)) + @test isa(Y,PVector) + y = A*B[1] + @test isa(y,PVector) + nullspace_linear_elasticity!(B,x) + y = A*B[1] + @test isa(y,PVector) + + x = node_coordinates_unit_cube(nodes_per_dir,parts_per_dir,ranks,split_format=true) + B = nullspace_linear_elasticity(x) + @test isa(B[1],PVector) + y = A*pones(axes(A,2)) + @test isa(y,PVector) + B = nullspace_linear_elasticity(x,partition(axes(A,2))) + @test isa(B[1],PVector) + y = A*pones(axes(A,2)) + @test isa(Y,PVector) + y = A*B[1] + @test isa(y,PVector) + nullspace_linear_elasticity!(B,x) + y = A*B[1] + @test isa(y,PVector) end