Skip to content

Commit

Permalink
auto merge of #12029 : zkamsler/rust/merge-sort-allocations, r=huonw
Browse files Browse the repository at this point in the history
This pull request:
1) Changes the initial insertion sort to be in-place, and defers allocation of working set until merge is needed.
2) Increases the increases the maximum run length to use insertion sort for from 8 to 32 elements. This increases the size of vectors that will not allocate, and reduces the number of merge passes by two. It seemed to be the sweet spot in the benchmarks that I ran.

Here are the results of some benchmarks. Note that they are sorting u64s, so types that are more expensive to compare or copy may have different behaviors.
Before changes:
```
test vec::bench::sort_random_large      bench:    719753 ns/iter (+/- 130173) = 111 MB/s
test vec::bench::sort_random_medium     bench:      4726 ns/iter (+/- 742) = 169 MB/s
test vec::bench::sort_random_small      bench:       344 ns/iter (+/- 76) = 116 MB/s
test vec::bench::sort_sorted            bench:    437244 ns/iter (+/- 70043) = 182 MB/s
```

Deferred allocation (8 element insertion sort):
```
test vec::bench::sort_random_large      bench:    702630 ns/iter (+/- 88158) = 113 MB/s
test vec::bench::sort_random_medium     bench:      4529 ns/iter (+/- 497) = 176 MB/s
test vec::bench::sort_random_small      bench:       185 ns/iter (+/- 49) = 216 MB/s
test vec::bench::sort_sorted            bench:    425853 ns/iter (+/- 60907) = 187 MB/s
```

Deferred allocation (16 element insertion sort):
```
test vec::bench::sort_random_large      bench:    692783 ns/iter (+/- 165837) = 115 MB/s
test vec::bench::sort_random_medium     bench:      4434 ns/iter (+/- 722) = 180 MB/s
test vec::bench::sort_random_small      bench:       187 ns/iter (+/- 38) = 213 MB/s
test vec::bench::sort_sorted            bench:    393783 ns/iter (+/- 85548) = 203 MB/s
```

Deferred allocation (32 element insertion sort):
```
test vec::bench::sort_random_large      bench:    682556 ns/iter (+/- 131008) = 117 MB/s
test vec::bench::sort_random_medium     bench:      4370 ns/iter (+/- 1369) = 183 MB/s
test vec::bench::sort_random_small      bench:       179 ns/iter (+/- 32) = 223 MB/s
test vec::bench::sort_sorted            bench:    358353 ns/iter (+/- 65423) = 223 MB/s
```

Deferred allocation (64 element insertion sort):
```
test vec::bench::sort_random_large      bench:    712040 ns/iter (+/- 132454) = 112 MB/s
test vec::bench::sort_random_medium     bench:      4425 ns/iter (+/- 784) = 180 MB/s
test vec::bench::sort_random_small      bench:       179 ns/iter (+/- 81) = 223 MB/s
test vec::bench::sort_sorted            bench:    317812 ns/iter (+/- 62675) = 251 MB/s
```

This is the best I could manage with the basic merge sort while keeping the invariant that the original vector must contain each element exactly once when the comparison function is called. If one is not married to a stable sort, an in-place n*log(n) sorting algorithm may have better performance in some cases.

for #12011
cc @huonw
  • Loading branch information
bors committed Feb 7, 2014
2 parents 7d7a060 + cebe5e8 commit 1fd2d77
Showing 1 changed file with 104 additions and 5 deletions.
109 changes: 104 additions & 5 deletions src/libstd/vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1812,12 +1812,70 @@ impl<T:Eq> OwnedEqVector<T> for ~[T] {
}
}

fn insertion_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
let len = v.len() as int;
let buf_v = v.as_mut_ptr();

// 1 <= i < len;
for i in range(1, len) {
// j satisfies: 0 <= j <= i;
let mut j = i;
unsafe {
// `i` is in bounds.
let read_ptr = buf_v.offset(i) as *T;

// find where to insert, we need to do strict <,
// rather than <=, to maintain stability.

// 0 <= j - 1 < len, so .offset(j - 1) is in bounds.
while j > 0 &&
compare(&*read_ptr, &*buf_v.offset(j - 1)) == Less {
j -= 1;
}

// shift everything to the right, to make space to
// insert this value.

// j + 1 could be `len` (for the last `i`), but in
// that case, `i == j` so we don't copy. The
// `.offset(j)` is always in bounds.

if i != j {
let tmp = ptr::read_ptr(read_ptr);
ptr::copy_memory(buf_v.offset(j + 1),
buf_v.offset(j),
(i - j) as uint);
ptr::copy_nonoverlapping_memory(buf_v.offset(j),
&tmp as *T,
1);
cast::forget(tmp);
}
}
}
}

fn merge_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
// warning: this wildly uses unsafe.
static INSERTION: uint = 8;
static BASE_INSERTION: uint = 32;
static LARGE_INSERTION: uint = 16;

// FIXME #12092: smaller insertion runs seems to make sorting
// vectors of large elements a little faster on some platforms,
// but hasn't been tested/tuned extensively
let insertion = if size_of::<T>() <= 16 {
BASE_INSERTION
} else {
LARGE_INSERTION
};

let len = v.len();

// short vectors get sorted in-place via insertion sort to avoid allocations
if len <= insertion {
insertion_sort(v, compare);
return;
}

// allocate some memory to use as scratch memory, we keep the
// length 0 so we can keep shallow copies of the contents of `v`
// without risking the dtors running on an object twice if
Expand All @@ -1837,9 +1895,9 @@ fn merge_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
// We could hardcode the sorting comparisons here, and we could
// manipulate/step the pointers themselves, rather than repeatedly
// .offset-ing.
for start in range_step(0, len, INSERTION) {
// start <= i <= len;
for i in range(start, cmp::min(start + INSERTION, len)) {
for start in range_step(0, len, insertion) {
// start <= i < len;
for i in range(start, cmp::min(start + insertion, len)) {
// j satisfies: start <= j <= i;
let mut j = i as int;
unsafe {
Expand Down Expand Up @@ -1871,7 +1929,7 @@ fn merge_sort<T>(v: &mut [T], compare: |&T, &T| -> Ordering) {
}

// step 2. merge the sorted runs.
let mut width = INSERTION;
let mut width = insertion;
while width < len {
// merge the sorted runs of length `width` in `buf_dat` two at
// a time, placing the result in `buf_tmp`.
Expand Down Expand Up @@ -4505,4 +4563,45 @@ mod bench {
});
bh.bytes = (v.len() * mem::size_of_val(&v[0])) as u64;
}

type BigSortable = (u64,u64,u64,u64);

#[bench]
fn sort_big_random_small(bh: &mut BenchHarness) {
let mut rng = weak_rng();
bh.iter(|| {
let mut v: ~[BigSortable] = rng.gen_vec(5);
v.sort();
});
bh.bytes = 5 * mem::size_of::<BigSortable>() as u64;
}

#[bench]
fn sort_big_random_medium(bh: &mut BenchHarness) {
let mut rng = weak_rng();
bh.iter(|| {
let mut v: ~[BigSortable] = rng.gen_vec(100);
v.sort();
});
bh.bytes = 100 * mem::size_of::<BigSortable>() as u64;
}

#[bench]
fn sort_big_random_large(bh: &mut BenchHarness) {
let mut rng = weak_rng();
bh.iter(|| {
let mut v: ~[BigSortable] = rng.gen_vec(10000);
v.sort();
});
bh.bytes = 10000 * mem::size_of::<BigSortable>() as u64;
}

#[bench]
fn sort_big_sorted(bh: &mut BenchHarness) {
let mut v = vec::from_fn(10000u, |i| (i, i, i, i));
bh.iter(|| {
v.sort();
});
bh.bytes = (v.len() * mem::size_of_val(&v[0])) as u64;
}
}

0 comments on commit 1fd2d77

Please sign in to comment.