Skip to content

Commit

Permalink
rust: thread: Add Thread support
Browse files Browse the repository at this point in the history
Signed-off-by: Boqun Feng <[email protected]>
  • Loading branch information
fbq committed Mar 19, 2021
1 parent 6f75590 commit 4da9cb7
Show file tree
Hide file tree
Showing 5 changed files with 319 additions and 0 deletions.
50 changes: 50 additions & 0 deletions drivers/char/rust_example.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,16 @@
#![feature(test)]

use alloc::boxed::Box;
use alloc::sync::Arc;
use core::pin::Pin;
use core::sync::atomic::{AtomicBool, Ordering};
use kernel::prelude::*;
use kernel::{
chrdev, condvar_init, cstr,
file_operations::FileOperations,
miscdev, mutex_init, spinlock_init,
sync::{CondVar, Mutex, SpinLock},
thread::{schedule, Thread},
};

module! {
Expand Down Expand Up @@ -127,6 +130,53 @@ impl KernelModule for RustExample {
cv.free_waiters();
}

// Test threads.
{
let mut a = 1;
// FIXME: use a completion or a barrier.
let flag = Arc::try_new(AtomicBool::new(false))?;
let other = flag.clone();

let t1 = Thread::try_new(cstr!("rust-thread"), move || {
other.store(true, Ordering::Release);
let b = Box::try_new(42)?;
for _ in 0..20 {
a += 1;
println!("Hello Rust Thread {}", a + b.as_ref());
}

Ok(())
})?;

t1.wake_up();

// Waits to observe the thread run.
while !flag.load(Ordering::Acquire) {
schedule();
}

// `t1` should exit normally.
t1.stop().expect("Rust thread should exit normally");
}

// Test threads (not up for running).
{
let mut a = 1;

let t1 = Thread::try_new(cstr!("rust-thread"), move || {
let b = Box::try_new(42)?;
for _ in 0..20 {
a += 1;
println!("Hello Rust Thread {}", a + b.as_ref());
}

Ok(())
})?;

// Without `wake_up`, `stop` will cause the thread to exits with -EINTR.
t1.stop().expect_err("Rust thread should exit abnormally");
}

// Including this large variable on the stack will trigger
// stack probing on the supported archs.
// This will verify that stack probing does not lead to
Expand Down
13 changes: 13 additions & 0 deletions rust/helpers.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <linux/build_bug.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>

void rust_helper_BUG(void)
{
Expand Down Expand Up @@ -60,6 +61,18 @@ int rust_helper_signal_pending(void)
}
EXPORT_SYMBOL(rust_helper_signal_pending);

void rust_helper_get_task_struct(struct task_struct *task)
{
(void)get_task_struct(task);
}
EXPORT_SYMBOL(rust_helper_get_task_struct);

void rust_helper_put_task_struct(struct task_struct *task)
{
put_task_struct(task);
}
EXPORT_SYMBOL(rust_helper_put_task_struct);

// See https://github.com/rust-lang/rust-bindgen/issues/1671
static_assert(__builtin_types_compatible_p(size_t, uintptr_t),
"size_t must match uintptr_t, what architecture is this??");
2 changes: 2 additions & 0 deletions rust/kernel/bindings_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
#include <linux/version.h>
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/kthread.h>
#include <linux/err.h>

// `bindgen` gets confused at certain things
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
Expand Down
1 change: 1 addition & 0 deletions rust/kernel/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ pub mod printk;
pub mod random;
mod static_assert;
pub mod sync;
pub mod thread;

#[cfg(CONFIG_SYSCTL)]
pub mod sysctl;
Expand Down
253 changes: 253 additions & 0 deletions rust/kernel/thread.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,253 @@
// SPDX-License-Identifier: GPL-2.0

//! A kernel thread (kthread)
//!
//! This modules allows Rust code to create/wakup/stop a kernel thread

use crate::c_types;
use crate::error::{ptr_to_result, Error, KernelResult};
use crate::{bindings, cstr, CStr};

use alloc::boxed::Box;
use core::ops::FnOnce;

extern "C" {
#[allow(improper_ctypes)]
fn rust_helper_get_task_struct(task: *mut bindings::task_struct);
#[allow(improper_ctypes)]
fn rust_helper_put_task_struct(task: *mut bindings::task_struct);
}

/// Function passed to `kthread_create_on_node` as function pointer. No other user.
#[no_mangle]
unsafe extern "C" fn rust_thread_func(data: *mut c_types::c_void) -> c_types::c_int {
// ·Box::from_raw()` to get the ownership of the closure.
let c = Box::from_raw(data as *mut Box<dyn FnOnce() -> KernelResult<()>>);

let ret = c();

match ret {
Ok(_) => 0,
Err(e) => e.to_kernel_errno(),
}
}

/// A kernel thread handle
pub struct Thread {
/// Pointer to kernel thread
task: *mut bindings::task_struct,
}

impl Thread {
/// Creates a new thread using C-style function pointer.
///
/// No extra memory allocation for thread creation that `kthread_create_on_node`. Use when
/// closure allocation overhead is unacceptable or there is already a C style thread function.
/// Otherwise, please consider using [`Thread::try_new`].
///
/// # Safety
///
/// This function actually doesn't dereference `arg` or call `f`, so even if the users pass
/// incorrect parameters this function won't run into trouble. But if the users provide
/// incorrect `arg` or `f` the new thread will corrupt memory or do other unsafe behaviors, to
/// make it `unsafe`.
///
/// The safety requirements of calling this function are:
///
/// - Make sure `arg` is a proper pointer that points to a valid memory location when the new
/// thread begins to run.
///
/// - Make sure `f` is a proper function pointer and `f` handles `arg` correctly.
///
/// # Context
///
/// This function might sleep due to the memory allocation and waiting for completion in
/// `kthread_create_on_node`. Therefore cannot call this in atomic contexts(i.e. preemption-off
/// contexts).
pub unsafe fn try_new_c_style(
name: CStr,
f: unsafe extern "C" fn(*mut c_types::c_void) -> c_types::c_int,
arg: *mut c_types::c_void,
) -> KernelResult<Self> {
let task;

// SAFETY:
//
// - `kthread_create_on_node` will copy the content of `name`, so we don't need to make the
// `name` live longer.
task = ptr_to_result(bindings::kthread_create_on_node(
Some(f),
arg,
bindings::NUMA_NO_NODE,
cstr!("%s").as_ptr() as _,
name.as_ptr(),
))?;

// Increases the refcount of the task, so that it won't go away if it `do_exit`.
// SAFETY: `task` is a proper pointer pointing to a newly created thread.
rust_helper_get_task_struct(task);

Ok(Thread { task })
}

/// Creates a new thread.
///
/// # Examples
///
/// ```
/// use kernel::thread::Thread;
/// use alloc::boxed::Box;
///
/// let mut a = 1;
///
/// let t = Thread::try_new(
/// move || {
/// let b = Box::try_new(42)?;
///
/// for _ in 0..10 {
/// a = a + 1;
/// println!("Hello Rust Thread {}", a + b.as_ref());
/// }
/// Ok(())
/// },
/// cstr!("rust-thread")
/// )?;
///
/// t.wake_up();
/// ```
///
/// # Context
///
/// This function might sleep due to the memory allocation and waiting for completion in
/// `kthread_create_on_node`. Therefore cannot call this in atomic contexts(i.e. preemption-off
/// contexts).
pub fn try_new<F>(name: CStr, f: F) -> KernelResult<Self>
where
F: FnOnce() -> KernelResult<()>,
F: Send + 'static,
{
// Allocate closure here, because this function maybe returns before `rust_thread_func`
// (the function that use the closure) get executed.
let boxed_fn: Box<dyn FnOnce() -> KernelResult<()> + 'static> = Box::try_new(f)?;

// Double boxing here because `dyn FnOnce` is a fat pointer, and we can only pass a usize
// as the `data` for `kthread_create_on_node`.
//
// We `into_raw` from this side, and will `from_raw` at the other side to transfer the
// ownership of the boxed data.
let double_box_ptr = Box::into_raw(Box::try_new(boxed_fn)?) as *mut _;

let result;
// SAFETY:
//
// - `double_box_ptr` is a proper pointer (generated by `Box::into_raw`), and if succeed,
// the new thread will get the ownership.
//
// - `rust_thread_func` is provided by us and handles the dereference of the
// `double_box_ptr` (via `Box::from_raw`)
unsafe {
result = Self::try_new_c_style(name, rust_thread_func, double_box_ptr);
}

if let Err(e) = result {
// Creation fails, we need to get back the double boxed closure.
//
// SAFETY:
//
// `double_box_ptr` is a proper pointer generated by a `Box::into_raw()` from a box
// created by us, if the thread creation fails, no one will consume that pointer.
unsafe {
Box::from_raw(double_box_ptr);
}

Err(e)
} else {
result
}
}

/// Wakes up the thread.
///
/// Note that a newly created thread (e.g. via [`Thread::try_new`]) will not run until a
/// [`Thread::wake_up`] is called.
///
/// # Context
///
/// This function might sleep, don't call in atomic contexts.
pub fn wake_up(&self) {
// SAFETY:
//
// `task` is a valid pointer to a kernel thread structure, the refcount of which is
// increased in `try_new*`, so it won't point to a freed `task_struct`. And it's not
// stopped because `stop` will consume the [`Thread`].
unsafe {
bindings::wake_up_process(self.task);
}
}

/// Stops the thread.
///
/// - If the thread hasn't been waken up after creation, the thread closure won't be called,
/// and will return `EINTR`. Note that a thread may not be waken up even after
/// [`Thread::wake_up`] is called.
///
/// - Otherwise, wait for the closure to return or the thread `do_exit` itself.
///
/// Consume the [`Thread`] so that it's not accessible. Return the result of the thread
/// closure (or the exit code in [`KernelResult`] format).
///
/// # Context
///
/// This function might sleep, don't call in atomic contexts.
pub fn stop(self) -> KernelResult<()> {
let ret;
// SAFETY:
//
// `task` is a valid pointer to a kernel thread structure, the refcount of which is
// increased in `try_new*`, so it won't point to a freed `task_struct`. And it's not
// stopped because `stop` will consume the [`Thread`].
unsafe { ret = bindings::kthread_stop(self.task) }

if ret == 0 {
Ok(())
} else {
Err(Error::from_kernel_errno(ret))
}
}
}

impl Drop for Thread {
fn drop(&mut self) {
// Decreases the refcount of the thread, the thread may still be running after we `drop`
// the `Thread`.
//
// SAFETY:
//
// At least one refcount is held by `Thread::try_new*` and refcount of `task_struct` is
// implemented by atomics.
unsafe {
rust_helper_put_task_struct(self.task);
}
}
}

/// Tries to give up the cpu and let another thread to run.
///
/// This maps to kernel's `schedule` function, which is similar to [`std::thread::yield_now`].
///
/// # Context
///
/// This function might sleep, don't call in atomic contexts.
///
/// [`std::thread::yield_now`]: https://doc.rust-lang.org/std/thread/fn.yield_now.html
pub fn schedule() {
// SAFETY:
//
// If we can schedule back from other thread, then this can be treated as no-ops. A special
// case are a thread sets its state to `TASK_DEAD`, and then `schedule` will not come.
// Currently we don't have a way to do this safely in Rust, and in the future, we probably
// still don't allow it.
unsafe {
bindings::schedule();
}
}

0 comments on commit 4da9cb7

Please sign in to comment.