@@ -291,54 +291,6 @@ pub trait CloneableVector<T> {
291291impl < ' a , T : Clone > CloneableVector < T > for & ' a [ T ] {
292292 /// Returns a copy of `v`.
293293 #[ inline]
294- #[ cfg( stage0) ]
295- fn to_owned ( & self ) -> ~[ T ] {
296- use RawVec = core:: raw:: Vec ;
297- use num:: { CheckedAdd , CheckedMul } ;
298- use option:: Expect ;
299-
300- let len = self . len ( ) ;
301- let data_size = len. checked_mul ( & mem:: size_of :: < T > ( ) ) ;
302- let data_size = data_size. expect ( "overflow in to_owned()" ) ;
303- let size = mem:: size_of :: < RawVec < ( ) > > ( ) . checked_add ( & data_size) ;
304- let size = size. expect ( "overflow in to_owned()" ) ;
305-
306- unsafe {
307- // this should pass the real required alignment
308- let ret = exchange_malloc ( size) as * mut RawVec < ( ) > ;
309-
310- ( * ret) . fill = len * mem:: nonzero_size_of :: < T > ( ) ;
311- ( * ret) . alloc = len * mem:: nonzero_size_of :: < T > ( ) ;
312-
313- // Be careful with the following loop. We want it to be optimized
314- // to a memcpy (or something similarly fast) when T is Copy. LLVM
315- // is easily confused, so any extra operations during the loop can
316- // prevent this optimization.
317- let mut i = 0 ;
318- let p = & mut ( * ret) . data as * mut _ as * mut T ;
319- try_finally (
320- & mut i, ( ) ,
321- |i, ( ) | while * i < len {
322- mem:: move_val_init (
323- & mut ( * p. offset ( * i as int ) ) ,
324- self . unsafe_ref ( * i) . clone ( ) ) ;
325- * i += 1 ;
326- } ,
327- |i| if * i < len {
328- // we must be failing, clean up after ourselves
329- for j in range ( 0 , * i as int ) {
330- ptr:: read ( & * p. offset ( j) ) ;
331- }
332- // FIXME: #13994 (should pass align and size here)
333- deallocate ( ret as * mut u8 , 0 , 8 ) ;
334- } ) ;
335- mem:: transmute ( ret)
336- }
337- }
338-
339- /// Returns a copy of `v`.
340- #[ inline]
341- #[ cfg( not( stage0) ) ]
342294 fn to_owned ( & self ) -> ~[ T ] {
343295 use RawVec = core:: raw:: Vec ;
344296 use num:: { CheckedAdd , CheckedMul } ;
0 commit comments