@@ -31,7 +31,7 @@ use smallvec::SmallVec;
3131use  std:: { 
3232    iter, 
3333    mem:: { self } , 
34-     ptr:: { self ,   NonNull } , 
34+     ptr:: NonNull , 
3535    sync:: { atomic:: Ordering ,  Arc } , 
3636} ; 
3737use  thiserror:: Error ; 
@@ -405,17 +405,13 @@ impl Global {
405405        // Platform validation requires that the staging buffer always be 
406406        // freed, even if an error occurs. All paths from here must call 
407407        // `device.pending_writes.consume`. 
408-         let  ( staging_buffer ,  staging_buffer_ptr )  = StagingBuffer :: new ( device,  data_size) ?; 
408+         let  mut  staging_buffer  = StagingBuffer :: new ( device,  data_size) ?; 
409409        let  mut  pending_writes = device. pending_writes . lock ( ) ; 
410410        let  pending_writes = pending_writes. as_mut ( ) . unwrap ( ) ; 
411411
412-         let  staging_buffer = unsafe   { 
412+         let  staging_buffer = { 
413413            profiling:: scope!( "copy" ) ; 
414-             ptr:: copy_nonoverlapping ( 
415-                 data. as_ptr ( ) , 
416-                 staging_buffer_ptr. as_ptr ( ) , 
417-                 data_size. get ( )  as  usize , 
418-             ) ; 
414+             staging_buffer. write ( data) ; 
419415            staging_buffer. flush ( ) 
420416        } ; 
421417
@@ -448,13 +444,14 @@ impl Global {
448444
449445        let  device = & queue. device ; 
450446
451-         let  ( staging_buffer,  staging_buffer_ptr)  = StagingBuffer :: new ( device,  buffer_size) ?; 
447+         let  staging_buffer = StagingBuffer :: new ( device,  buffer_size) ?; 
448+         let  ptr = unsafe  {  staging_buffer. ptr ( )  } ; 
452449
453450        let  fid = hub. staging_buffers . prepare ( id_in) ; 
454451        let  id = fid. assign ( Arc :: new ( staging_buffer) ) ; 
455452        resource_log ! ( "Queue::create_staging_buffer {id:?}" ) ; 
456453
457-         Ok ( ( id,  staging_buffer_ptr ) ) 
454+         Ok ( ( id,  ptr ) ) 
458455    } 
459456
460457    pub  fn  queue_write_staging_buffer < A :  HalApi > ( 
@@ -487,7 +484,7 @@ impl Global {
487484        // user. Platform validation requires that the staging buffer always 
488485        // be freed, even if an error occurs. All paths from here must call 
489486        // `device.pending_writes.consume`. 
490-         let  staging_buffer = unsafe   {   staging_buffer. flush ( )   } ; 
487+         let  staging_buffer = staging_buffer. flush ( ) ; 
491488
492489        let  result = self . queue_write_staging_buffer_impl ( 
493490            & queue, 
@@ -779,42 +776,34 @@ impl Global {
779776        // Platform validation requires that the staging buffer always be 
780777        // freed, even if an error occurs. All paths from here must call 
781778        // `device.pending_writes.consume`. 
782-         let  ( staging_buffer ,  staging_buffer_ptr )  = StagingBuffer :: new ( device,  stage_size) ?; 
779+         let  mut  staging_buffer  = StagingBuffer :: new ( device,  stage_size) ?; 
783780
784781        if  stage_bytes_per_row == bytes_per_row { 
785782            profiling:: scope!( "copy aligned" ) ; 
786783            // Fast path if the data is already being aligned optimally. 
787-             unsafe  { 
788-                 ptr:: copy_nonoverlapping ( 
789-                     data. as_ptr ( ) . offset ( data_layout. offset  as  isize ) , 
790-                     staging_buffer_ptr. as_ptr ( ) , 
791-                     stage_size. get ( )  as  usize , 
792-                 ) ; 
793-             } 
784+             staging_buffer. write ( & data[ data_layout. offset  as  usize ..] ) ; 
794785        }  else  { 
795786            profiling:: scope!( "copy chunked" ) ; 
796787            // Copy row by row into the optimal alignment. 
797788            let  copy_bytes_per_row = stage_bytes_per_row. min ( bytes_per_row)  as  usize ; 
798789            for  layer in  0 ..size. depth_or_array_layers  { 
799790                let  rows_offset = layer *  block_rows_per_image; 
800-                 for  row in  0 ..height_blocks { 
791+                 for  row in  rows_offset..rows_offset + height_blocks { 
792+                     let  src_offset = data_layout. offset  as  u32  + row *  bytes_per_row; 
793+                     let  dst_offset = row *  stage_bytes_per_row; 
801794                    unsafe  { 
802-                         ptr:: copy_nonoverlapping ( 
803-                             data. as_ptr ( ) . offset ( 
804-                                 data_layout. offset  as  isize 
805-                                     + ( rows_offset + row)  as  isize  *  bytes_per_row as  isize , 
806-                             ) , 
807-                             staging_buffer_ptr. as_ptr ( ) . offset ( 
808-                                 ( rows_offset + row)  as  isize  *  stage_bytes_per_row as  isize , 
809-                             ) , 
795+                         staging_buffer. write_with_offset ( 
796+                             data, 
797+                             src_offset as  isize , 
798+                             dst_offset as  isize , 
810799                            copy_bytes_per_row, 
811-                         ) ; 
800+                         ) 
812801                    } 
813802                } 
814803            } 
815804        } 
816805
817-         let  staging_buffer = unsafe   {   staging_buffer. flush ( )   } ; 
806+         let  staging_buffer = staging_buffer. flush ( ) ; 
818807
819808        let  regions = ( 0 ..array_layer_count) . map ( |rel_array_layer| { 
820809            let  mut  texture_base = dst_base. clone ( ) ; 
0 commit comments