diff --git a/src/libgreen/context.rs b/src/libgreen/context.rs index 5916557abdb81..4e626b3bec778 100644 --- a/src/libgreen/context.rs +++ b/src/libgreen/context.rs @@ -12,10 +12,9 @@ use std::libc::c_void; use std::uint; use std::cast::{transmute, transmute_mut_unsafe, transmute_region, transmute_mut_region}; +use stack::Stack; use std::unstable::stack; -use stack::StackSegment; - // FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing // SSE regs. It would be marginally better not to do this. In C++ we // use an attribute on a struct. @@ -41,7 +40,7 @@ impl Context { } /// Create a new context that will resume execution by running proc() - pub fn new(start: proc(), stack: &mut StackSegment) -> Context { + pub fn new(start: proc(), stack: &mut Stack) -> Context { // The C-ABI function that is the task entry point // // Note that this function is a little sketchy. We're taking a @@ -79,6 +78,7 @@ impl Context { // be passed to the spawn function. Another unfortunate // allocation let start = ~start; + initialize_call_frame(&mut *regs, task_start_wrapper as *c_void, unsafe { transmute(&*start) }, diff --git a/src/libgreen/coroutine.rs b/src/libgreen/coroutine.rs index 7bc5d0accfe3b..c001d40a2465d 100644 --- a/src/libgreen/coroutine.rs +++ b/src/libgreen/coroutine.rs @@ -14,7 +14,7 @@ use std::rt::env; use context::Context; -use stack::{StackPool, StackSegment}; +use stack::{StackPool, Stack}; /// A coroutine is nothing more than a (register context, stack) pair. pub struct Coroutine { @@ -24,7 +24,7 @@ pub struct Coroutine { /// /// Servo needs this to be public in order to tell SpiderMonkey /// about the stack bounds. - current_stack_segment: StackSegment, + current_stack_segment: Stack, /// Always valid if the task is alive and not running. saved_context: Context @@ -39,7 +39,7 @@ impl Coroutine { Some(size) => size, None => env::min_stack() }; - let mut stack = stack_pool.take_segment(stack_size); + let mut stack = stack_pool.take_stack(stack_size); let initial_context = Context::new(start, &mut stack); Coroutine { current_stack_segment: stack, @@ -49,7 +49,7 @@ impl Coroutine { pub fn empty() -> Coroutine { Coroutine { - current_stack_segment: StackSegment::new(0), + current_stack_segment: unsafe { Stack::dummy_stack() }, saved_context: Context::empty() } } @@ -57,6 +57,6 @@ impl Coroutine { /// Destroy coroutine and try to reuse std::stack segment. pub fn recycle(self, stack_pool: &mut StackPool) { let Coroutine { current_stack_segment, .. } = self; - stack_pool.give_segment(current_stack_segment); + stack_pool.give_stack(current_stack_segment); } } diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs index 7e6dd02dd67e0..bc2c199b83ffd 100644 --- a/src/libgreen/stack.rs +++ b/src/libgreen/stack.rs @@ -8,46 +8,113 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::vec; -use std::libc::{c_uint, uintptr_t}; +use std::rt::env::max_cached_stacks; +use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable, + MapNonStandardFlags, MapVirtual}; +use std::libc; -pub struct StackSegment { - priv buf: ~[u8], - priv valgrind_id: c_uint +/// A task's stack. The name "Stack" is a vestige of segmented stacks. +pub struct Stack { + priv buf: MemoryMap, + priv min_size: uint, + priv valgrind_id: libc::c_uint, } -impl StackSegment { - pub fn new(size: uint) -> StackSegment { - unsafe { - // Crate a block of uninitialized values - let mut stack = vec::with_capacity(size); - stack.set_len(size); +// Try to use MAP_STACK on platforms that support it (it's what we're doing +// anyway), but some platforms don't support it at all. For example, it appears +// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always +// fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html +#[cfg(not(windows), not(target_os = "freebsd"))] +static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE | + libc::MAP_ANON; +#[cfg(target_os = "freebsd")] +static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON; +#[cfg(windows)] +static STACK_FLAGS: libc::c_int = 0; - let mut stk = StackSegment { - buf: stack, - valgrind_id: 0 - }; +impl Stack { + /// Allocate a new stack of `size`. If size = 0, this will fail. Use + /// `dummy_stack` if you want a zero-sized stack. + pub fn new(size: uint) -> Stack { + // Map in a stack. Eventually we might be able to handle stack + // allocation failure, which would fail to spawn the task. But there's + // not many sensible things to do on OOM. Failure seems fine (and is + // what the old stack allocation did). + let stack = match MemoryMap::new(size, [MapReadable, MapWritable, + MapNonStandardFlags(STACK_FLAGS)]) { + Ok(map) => map, + Err(e) => fail!("mmap for stack of size {} failed: {}", size, e) + }; - // XXX: Using the FFI to call a C macro. Slow - stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end()); - return stk; + // Change the last page to be inaccessible. This is to provide safety; + // when an FFI function overflows it will (hopefully) hit this guard + // page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is + // guaranteed to be aligned properly. + if !protect_last_page(&stack) { + fail!("Could not memory-protect guard page. stack={:?}, errno={}", + stack, errno()); + } + + let mut stk = Stack { + buf: stack, + min_size: size, + valgrind_id: 0 + }; + + // XXX: Using the FFI to call a C macro. Slow + stk.valgrind_id = unsafe { + rust_valgrind_stack_register(stk.start(), stk.end()) + }; + return stk; + } + + /// Create a 0-length stack which starts (and ends) at 0. + pub unsafe fn dummy_stack() -> Stack { + Stack { + buf: MemoryMap { data: 0 as *mut u8, len: 0, kind: MapVirtual }, + min_size: 0, + valgrind_id: 0 } } /// Point to the low end of the allocated stack pub fn start(&self) -> *uint { - self.buf.as_ptr() as *uint + self.buf.data as *uint } - /// Point one word beyond the high end of the allocated stack + /// Point one uint beyond the high end of the allocated stack pub fn end(&self) -> *uint { unsafe { - self.buf.as_ptr().offset(self.buf.len() as int) as *uint + self.buf.data.offset(self.buf.len as int) as *uint } } } -impl Drop for StackSegment { +#[cfg(unix)] +fn protect_last_page(stack: &MemoryMap) -> bool { + unsafe { + // This may seem backwards: the start of the segment is the last page? + // Yes! The stack grows from higher addresses (the end of the allocated + // block) to lower addresses (the start of the allocated block). + let last_page = stack.data as *libc::c_void; + libc::mprotect(last_page, page_size() as libc::size_t, + libc::PROT_NONE) != -1 + } +} + +#[cfg(windows)] +fn protect_last_page(stack: &MemoryMap) -> bool { + unsafe { + // see above + let last_page = stack.data as *mut libc::c_void; + let mut old_prot: libc::DWORD = 0; + libc::VirtualProtect(last_page, page_size() as libc::SIZE_T, + libc::PAGE_NOACCESS, + &mut old_prot as libc::LPDWORD) != 0 + } +} + +impl Drop for Stack { fn drop(&mut self) { unsafe { // XXX: Using the FFI to call a C macro. Slow @@ -56,20 +123,36 @@ impl Drop for StackSegment { } } -pub struct StackPool(()); +pub struct StackPool { + // Ideally this would be some datastructure that preserved ordering on + // Stack.min_size. + priv stacks: ~[Stack], +} impl StackPool { - pub fn new() -> StackPool { StackPool(()) } + pub fn new() -> StackPool { + StackPool { + stacks: ~[], + } + } - pub fn take_segment(&self, min_size: uint) -> StackSegment { - StackSegment::new(min_size) + pub fn take_stack(&mut self, min_size: uint) -> Stack { + // Ideally this would be a binary search + match self.stacks.iter().position(|s| s.min_size < min_size) { + Some(idx) => self.stacks.swap_remove(idx), + None => Stack::new(min_size) + } } - pub fn give_segment(&self, _stack: StackSegment) { + pub fn give_stack(&mut self, stack: Stack) { + if self.stacks.len() <= max_cached_stacks() { + self.stacks.push(stack) + } } } extern { - fn rust_valgrind_stack_register(start: *uintptr_t, end: *uintptr_t) -> c_uint; - fn rust_valgrind_stack_deregister(id: c_uint); + fn rust_valgrind_stack_register(start: *libc::uintptr_t, + end: *libc::uintptr_t) -> libc::c_uint; + fn rust_valgrind_stack_deregister(id: libc::c_uint); } diff --git a/src/libstd/libc.rs b/src/libstd/libc.rs index a398835824b94..d5f185880fad6 100644 --- a/src/libstd/libc.rs +++ b/src/libstd/libc.rs @@ -2863,6 +2863,7 @@ pub mod consts { pub static MAP_PRIVATE : c_int = 0x0002; pub static MAP_FIXED : c_int = 0x0010; pub static MAP_ANON : c_int = 0x1000; + pub static MAP_STACK : c_int = 0; pub static MAP_FAILED : *c_void = -1 as *c_void; diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 1b55427fc2dbc..457d58ae46405 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -33,13 +33,13 @@ use container::Container; #[cfg(target_os = "macos")] use iter::range; use libc; -use libc::{c_char, c_void, c_int, size_t}; +use libc::{c_char, c_void, c_int}; use option::{Some, None}; use os; use prelude::*; use ptr; use str; -use to_str; +use fmt; use unstable::finally::Finally; use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; @@ -59,7 +59,7 @@ pub fn getcwd() -> Path { let mut buf = [0 as c_char, ..BUF_BYTES]; unsafe { - if libc::getcwd(buf.as_mut_ptr(), buf.len() as size_t).is_null() { + if libc::getcwd(buf.as_mut_ptr(), buf.len() as libc::size_t).is_null() { fail!() } Path::new(CString::new(buf.as_ptr(), false)) @@ -350,14 +350,16 @@ pub fn self_exe_name() -> Option { let mib = ~[CTL_KERN as c_int, KERN_PROC as c_int, KERN_PROC_PATHNAME as c_int, -1 as c_int]; - let mut sz: size_t = 0; + let mut sz: libc::size_t = 0; let err = sysctl(mib.as_ptr(), mib.len() as ::libc::c_uint, - ptr::mut_null(), &mut sz, ptr::null(), 0u as size_t); + ptr::mut_null(), &mut sz, ptr::null(), + 0u as libc::size_t); if err != 0 { return None; } if sz == 0 { return None; } let mut v: ~[u8] = vec::with_capacity(sz as uint); let err = sysctl(mib.as_ptr(), mib.len() as ::libc::c_uint, - v.as_mut_ptr() as *mut c_void, &mut sz, ptr::null(), 0u as size_t); + v.as_mut_ptr() as *mut c_void, &mut sz, ptr::null(), + 0u as libc::size_t); if err != 0 { return None; } if sz == 0 { return None; } v.set_len(sz as uint - 1); // chop off trailing NUL @@ -593,12 +595,12 @@ pub fn last_os_error() -> ~str { #[cfg(target_os = "macos")] #[cfg(target_os = "android")] #[cfg(target_os = "freebsd")] - fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) + fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int { #[nolink] extern { - fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) - -> c_int; + fn strerror_r(errnum: c_int, buf: *mut c_char, + buflen: libc::size_t) -> c_int; } unsafe { strerror_r(errnum, buf, buflen) @@ -609,12 +611,13 @@ pub fn last_os_error() -> ~str { // and requires macros to instead use the POSIX compliant variant. // So we just use __xpg_strerror_r which is always POSIX compliant #[cfg(target_os = "linux")] - fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: size_t) -> c_int { + fn strerror_r(errnum: c_int, buf: *mut c_char, + buflen: libc::size_t) -> c_int { #[nolink] extern { fn __xpg_strerror_r(errnum: c_int, buf: *mut c_char, - buflen: size_t) + buflen: libc::size_t) -> c_int; } unsafe { @@ -626,7 +629,7 @@ pub fn last_os_error() -> ~str { let p = buf.as_mut_ptr(); unsafe { - if strerror_r(errno() as c_int, p, buf.len() as size_t) < 0 { + if strerror_r(errno() as c_int, p, buf.len() as libc::size_t) < 0 { fail!("strerror_r failure"); } @@ -829,13 +832,14 @@ pub fn page_size() -> uint { } } -/// A memory mapped file or chunk of memory. This is a very system-specific interface to the OS's -/// memory mapping facilities (`mmap` on POSIX, `VirtualAlloc`/`CreateFileMapping` on win32). It -/// makes no attempt at abstracting platform differences, besides in error values returned. Consider +/// A memory mapped file or chunk of memory. This is a very system-specific +/// interface to the OS's memory mapping facilities (`mmap` on POSIX, +/// `VirtualAlloc`/`CreateFileMapping` on win32). It makes no attempt at +/// abstracting platform differences, besides in error values returned. Consider /// yourself warned. /// -/// The memory map is released (unmapped) when the destructor is run, so don't let it leave scope by -/// accident if you want it to stick around. +/// The memory map is released (unmapped) when the destructor is run, so don't +/// let it leave scope by accident if you want it to stick around. pub struct MemoryMap { /// Pointer to the memory created or modified by this map. data: *mut u8, @@ -847,11 +851,12 @@ pub struct MemoryMap { /// Type of memory map pub enum MemoryMapKind { - /// Memory-mapped file. On Windows, the inner pointer is a handle to the mapping, and - /// corresponds to `CreateFileMapping`. Elsewhere, it is null. + /// Virtual memory map. Usually used to change the permissions of a given + /// chunk of memory. Corresponds to `VirtualAlloc` on Windows. MapFile(*u8), - /// Virtual memory map. Usually used to change the permissions of a given chunk of memory. - /// Corresponds to `VirtualAlloc` on Windows. + /// Virtual memory map. Usually used to change the permissions of a given + /// chunk of memory, or for allocation. Corresponds to `VirtualAlloc` on + /// Windows. MapVirtual } @@ -863,81 +868,119 @@ pub enum MapOption { MapWritable, /// The memory should be executable MapExecutable, - /// Create a map for a specific address range. Corresponds to `MAP_FIXED` on POSIX. + /// Create a map for a specific address range. Corresponds to `MAP_FIXED` on + /// POSIX. MapAddr(*u8), /// Create a memory mapping for a file with a given fd. MapFd(c_int), - /// When using `MapFd`, the start of the map is `uint` bytes from the start of the file. - MapOffset(uint) + /// When using `MapFd`, the start of the map is `uint` bytes from the start + /// of the file. + MapOffset(uint), + /// On POSIX, this can be used to specify the default flags passed to + /// `mmap`. By default it uses `MAP_PRIVATE` and, if not using `MapFd`, + /// `MAP_ANON`. This will override both of those. This is platform-specific + /// (the exact values used) and ignored on Windows. + MapNonStandardFlags(c_int), } /// Possible errors when creating a map. pub enum MapError { /// ## The following are POSIX-specific /// - /// fd was not open for reading or, if using `MapWritable`, was not open for writing. + /// fd was not open for reading or, if using `MapWritable`, was not open for + /// writing. ErrFdNotAvail, /// fd was not valid ErrInvalidFd, - /// Either the address given by `MapAddr` or offset given by `MapOffset` was not a multiple of - /// `MemoryMap::granularity` (unaligned to page size). + /// Either the address given by `MapAddr` or offset given by `MapOffset` was + /// not a multiple of `MemoryMap::granularity` (unaligned to page size). ErrUnaligned, /// With `MapFd`, the fd does not support mapping. ErrNoMapSupport, - /// If using `MapAddr`, the address + `min_len` was outside of the process's address space. If - /// using `MapFd`, the target of the fd didn't have enough resources to fulfill the request. + /// If using `MapAddr`, the address + `min_len` was outside of the process's + /// address space. If using `MapFd`, the target of the fd didn't have enough + /// resources to fulfill the request. ErrNoMem, + /// A zero-length map was requested. This is invalid according to + /// [POSIX](http://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html). + /// Not all platforms obey this, but this wrapper does. + ErrZeroLength, /// Unrecognized error. The inner value is the unrecognized errno. ErrUnknown(int), /// ## The following are win32-specific /// - /// Unsupported combination of protection flags (`MapReadable`/`MapWritable`/`MapExecutable`). + /// Unsupported combination of protection flags + /// (`MapReadable`/`MapWritable`/`MapExecutable`). ErrUnsupProt, - /// When using `MapFd`, `MapOffset` was given (Windows does not support this at all) + /// When using `MapFd`, `MapOffset` was given (Windows does not support this + /// at all) ErrUnsupOffset, /// When using `MapFd`, there was already a mapping to the file. ErrAlreadyExists, - /// Unrecognized error from `VirtualAlloc`. The inner value is the return value of GetLastError. + /// Unrecognized error from `VirtualAlloc`. The inner value is the return + /// value of GetLastError. ErrVirtualAlloc(uint), - /// Unrecognized error from `CreateFileMapping`. The inner value is the return value of - /// `GetLastError`. + /// Unrecognized error from `CreateFileMapping`. The inner value is the + /// return value of `GetLastError`. ErrCreateFileMappingW(uint), - /// Unrecognized error from `MapViewOfFile`. The inner value is the return value of - /// `GetLastError`. + /// Unrecognized error from `MapViewOfFile`. The inner value is the return + /// value of `GetLastError`. ErrMapViewOfFile(uint) } -impl to_str::ToStr for MapError { - fn to_str(&self) -> ~str { - match *self { - ErrFdNotAvail => ~"fd not available for reading or writing", - ErrInvalidFd => ~"Invalid fd", - ErrUnaligned => ~"Unaligned address, invalid flags, \ - negative length or unaligned offset", - ErrNoMapSupport=> ~"File doesn't support mapping", - ErrNoMem => ~"Invalid address, or not enough available memory", - ErrUnknown(code) => format!("Unknown error={}", code), - ErrUnsupProt => ~"Protection mode unsupported", - ErrUnsupOffset => ~"Offset in virtual memory mode is unsupported", - ErrAlreadyExists => ~"File mapping for specified file already exists", - ErrVirtualAlloc(code) => format!("VirtualAlloc failure={}", code), - ErrCreateFileMappingW(code) => format!("CreateFileMappingW failure={}", code), - ErrMapViewOfFile(code) => format!("MapViewOfFile failure={}", code) - } +impl fmt::Default for MapError { + fn fmt(val: &MapError, out: &mut fmt::Formatter) { + let str = match *val { + ErrFdNotAvail => "fd not available for reading or writing", + ErrInvalidFd => "Invalid fd", + ErrUnaligned => { + "Unaligned address, invalid flags, negative length or \ + unaligned offset" + } + ErrNoMapSupport=> "File doesn't support mapping", + ErrNoMem => "Invalid address, or not enough available memory", + ErrUnsupProt => "Protection mode unsupported", + ErrUnsupOffset => "Offset in virtual memory mode is unsupported", + ErrAlreadyExists => "File mapping for specified file already exists", + ErrZeroLength => "Zero-length mapping not allowed", + ErrUnknown(code) => { + write!(out.buf, "Unknown error = {}", code); + return + }, + ErrVirtualAlloc(code) => { + write!(out.buf, "VirtualAlloc failure = {}", code); + return + }, + ErrCreateFileMappingW(code) => { + format!("CreateFileMappingW failure = {}", code); + return + }, + ErrMapViewOfFile(code) => { + write!(out.buf, "MapViewOfFile failure = {}", code); + return + } + }; + write!(out.buf, "{}", str); } } #[cfg(unix)] impl MemoryMap { - /// Create a new mapping with the given `options`, at least `min_len` bytes long. + /// Create a new mapping with the given `options`, at least `min_len` bytes + /// long. `min_len` must be greater than zero; see the note on + /// `ErrZeroLength`. pub fn new(min_len: uint, options: &[MapOption]) -> Result { use libc::off_t; + if min_len == 0 { + return Err(ErrZeroLength) + } let mut addr: *u8 = ptr::null(); let mut prot = 0; let mut flags = libc::MAP_PRIVATE; let mut fd = -1; let mut offset = 0; + let mut custom_flags = false; let len = round_up(min_len, page_size()); for &o in options.iter() { @@ -953,13 +996,15 @@ impl MemoryMap { flags |= libc::MAP_FILE; fd = fd_; }, - MapOffset(offset_) => { offset = offset_ as off_t; } + MapOffset(offset_) => { offset = offset_ as off_t; }, + MapNonStandardFlags(f) => { custom_flags = true; flags = f }, } } - if fd == -1 { flags |= libc::MAP_ANON; } + if fd == -1 && !custom_flags { flags |= libc::MAP_ANON; } let r = unsafe { - libc::mmap(addr as *c_void, len as size_t, prot, flags, fd, offset) + libc::mmap(addr as *c_void, len as libc::size_t, prot, flags, fd, + offset) }; if r.equiv(&libc::MAP_FAILED) { Err(match errno() as c_int { @@ -983,7 +1028,8 @@ impl MemoryMap { } } - /// Granularity that the offset or address must be for `MapOffset` and `MapAddr` respectively. + /// Granularity that the offset or address must be for `MapOffset` and + /// `MapAddr` respectively. pub fn granularity() -> uint { page_size() } @@ -993,6 +1039,8 @@ impl MemoryMap { impl Drop for MemoryMap { /// Unmap the mapping. Fails the task if `munmap` fails. fn drop(&mut self) { + if self.len == 0 { /* workaround for dummy_stack */ return; } + unsafe { match libc::munmap(self.data as *c_void, self.len as libc::size_t) { 0 => (), @@ -1027,7 +1075,11 @@ impl MemoryMap { MapExecutable => { executable = true; } MapAddr(addr_) => { lpAddress = addr_ as LPVOID; }, MapFd(fd_) => { fd = fd_; }, - MapOffset(offset_) => { offset = offset_; } + MapOffset(offset_) => { offset = offset_; }, + MapNonStandardFlags(f) => { + info!("MemoryMap::new: MapNonStandardFlags used on \ + Windows: {}", f) + } } } @@ -1113,18 +1165,18 @@ impl MemoryMap { #[cfg(windows)] impl Drop for MemoryMap { - /// Unmap the mapping. Fails the task if any of `VirtualFree`, `UnmapViewOfFile`, or - /// `CloseHandle` fail. + /// Unmap the mapping. Fails the task if any of `VirtualFree`, + /// `UnmapViewOfFile`, or `CloseHandle` fail. fn drop(&mut self) { use libc::types::os::arch::extra::{LPCVOID, HANDLE}; use libc::consts::os::extra::FALSE; + if self.len == 0 { return } unsafe { match self.kind { MapVirtual => { - if libc::VirtualFree(self.data as *mut c_void, - self.len as size_t, - libc::MEM_RELEASE) == FALSE { + if libc::VirtualFree(self.data as *mut c_void, 0, + libc::MEM_RELEASE) == 0 { error!("VirtualFree failed: {}", errno()); } }, @@ -1429,7 +1481,7 @@ mod tests { os::MapWritable ]) { Ok(chunk) => chunk, - Err(msg) => fail!(msg.to_str()) + Err(msg) => fail!("{}", msg) }; assert!(chunk.len >= 16); @@ -1479,7 +1531,7 @@ mod tests { MapOffset(size / 2) ]) { Ok(chunk) => chunk, - Err(msg) => fail!(msg.to_str()) + Err(msg) => fail!("{}", msg) }; assert!(chunk.len > 0); diff --git a/src/libstd/rt/env.rs b/src/libstd/rt/env.rs index f3fa482b18cca..729e377e1af31 100644 --- a/src/libstd/rt/env.rs +++ b/src/libstd/rt/env.rs @@ -10,7 +10,7 @@ //! Runtime environment settings -use from_str::FromStr; +use from_str::from_str; use option::{Some, None}; use os; @@ -18,18 +18,25 @@ use os; // They are expected to be initialized once then left alone. static mut MIN_STACK: uint = 2 * 1024 * 1024; +/// This default corresponds to 20M of cache per scheduler (at the default size). +static mut MAX_CACHED_STACKS: uint = 10; static mut DEBUG_BORROW: bool = false; static mut POISON_ON_FREE: bool = false; pub fn init() { unsafe { match os::getenv("RUST_MIN_STACK") { - Some(s) => match FromStr::from_str(s) { + Some(s) => match from_str(s) { Some(i) => MIN_STACK = i, None => () }, None => () } + match os::getenv("RUST_MAX_CACHED_STACKS") { + Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \ + RUST_MAX_CACHED_STACKS"), + None => () + } match os::getenv("RUST_DEBUG_BORROW") { Some(_) => DEBUG_BORROW = true, None => () @@ -45,6 +52,10 @@ pub fn min_stack() -> uint { unsafe { MIN_STACK } } +pub fn max_cached_stacks() -> uint { + unsafe { MAX_CACHED_STACKS } +} + pub fn debug_borrow() -> bool { unsafe { DEBUG_BORROW } } diff --git a/src/test/bench/silly-test-spawn.rs b/src/test/bench/silly-test-spawn.rs new file mode 100644 index 0000000000000..900796efe33d3 --- /dev/null +++ b/src/test/bench/silly-test-spawn.rs @@ -0,0 +1,16 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Useful smoketest for scheduler performance. +fn main() { + for _ in range(1, 100_000) { + do spawn { } + } +} diff --git a/src/test/bench/spawnone.rs b/src/test/bench/spawnone.rs new file mode 100644 index 0000000000000..75485a7fb9ecb --- /dev/null +++ b/src/test/bench/spawnone.rs @@ -0,0 +1,14 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Useful for checking syscall usage of baseline scheduler usage +fn main() { + do spawn { } +} diff --git a/src/test/bench/task-perf-one-million.rs b/src/test/bench/task-perf-one-million.rs index cdbec1784b98e..e9a9ed194749d 100644 --- a/src/test/bench/task-perf-one-million.rs +++ b/src/test/bench/task-perf-one-million.rs @@ -56,7 +56,7 @@ fn main() { args }; - let children = from_str::(args[1]).get(); + let children = from_str::(args[1]).unwrap(); let (wait_port, wait_chan) = stream(); do task::spawn { calc(children, &wait_chan); @@ -66,5 +66,5 @@ fn main() { let (sum_port, sum_chan) = stream::(); start_chan.send(sum_chan); let sum = sum_port.recv(); - error!("How many tasks? %d tasks.", sum); + error!("How many tasks? {} tasks.", sum); }