Skip to content

Commit 5d3fd95

Browse files
committed
rust: thread: Add Thread support
Signed-off-by: Boqun Feng <[email protected]>
1 parent 6f75590 commit 5d3fd95

File tree

5 files changed

+318
-0
lines changed

5 files changed

+318
-0
lines changed

drivers/char/rust_example.rs

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,16 @@
77
#![feature(test)]
88

99
use alloc::boxed::Box;
10+
use alloc::sync::Arc;
1011
use core::pin::Pin;
12+
use core::sync::atomic::{AtomicBool, Ordering};
1113
use kernel::prelude::*;
1214
use kernel::{
1315
chrdev, condvar_init, cstr,
1416
file_operations::FileOperations,
1517
miscdev, mutex_init, spinlock_init,
1618
sync::{CondVar, Mutex, SpinLock},
19+
thread::{schedule, Thread},
1720
};
1821

1922
module! {
@@ -127,6 +130,53 @@ impl KernelModule for RustExample {
127130
cv.free_waiters();
128131
}
129132

133+
// Test threads.
134+
{
135+
let mut a = 1;
136+
// FIXME: use a completion or a barrier.
137+
let flag = Arc::try_new(AtomicBool::new(false))?;
138+
let other = flag.clone();
139+
140+
let t1 = Thread::try_new(cstr!("rust-thread"), move || {
141+
other.store(true, Ordering::Release);
142+
let b = Box::try_new(42)?;
143+
for _ in 0..20 {
144+
a += 1;
145+
println!("Hello Rust Thread {}", a + b.as_ref());
146+
}
147+
148+
Ok(())
149+
})?;
150+
151+
t1.wake_up();
152+
153+
// Waits to observe the thread run.
154+
while !flag.load(Ordering::Acquire) {
155+
schedule();
156+
}
157+
158+
// `t1` should exit normally.
159+
t1.stop().expect("Rust thread should exit normally");
160+
}
161+
162+
// Test threads (not up for running).
163+
{
164+
let mut a = 1;
165+
166+
let t1 = Thread::try_new(cstr!("rust-thread"), move || {
167+
let b = Box::try_new(42)?;
168+
for _ in 0..20 {
169+
a += 1;
170+
println!("Hello Rust Thread {}", a + b.as_ref());
171+
}
172+
173+
Ok(())
174+
})?;
175+
176+
// Without `wake_up`, `stop` will cause the thread to exits with -EINTR.
177+
t1.stop().expect_err("Rust thread should exit abnormally");
178+
}
179+
130180
// Including this large variable on the stack will trigger
131181
// stack probing on the supported archs.
132182
// This will verify that stack probing does not lead to

rust/helpers.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/build_bug.h>
55
#include <linux/uaccess.h>
66
#include <linux/sched/signal.h>
7+
#include <linux/sched/task.h>
78

89
void rust_helper_BUG(void)
910
{
@@ -60,6 +61,18 @@ int rust_helper_signal_pending(void)
6061
}
6162
EXPORT_SYMBOL(rust_helper_signal_pending);
6263

64+
void rust_helper_get_task_struct(struct task_struct *task)
65+
{
66+
(void)get_task_struct(task);
67+
}
68+
EXPORT_SYMBOL(rust_helper_get_task_struct);
69+
70+
void rust_helper_put_task_struct(struct task_struct *task)
71+
{
72+
put_task_struct(task);
73+
}
74+
EXPORT_SYMBOL(rust_helper_put_task_struct);
75+
6376
// See https://github.com/rust-lang/rust-bindgen/issues/1671
6477
static_assert(__builtin_types_compatible_p(size_t, uintptr_t),
6578
"size_t must match uintptr_t, what architecture is this??");

rust/kernel/bindings_helper.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
#include <linux/version.h>
1111
#include <linux/miscdevice.h>
1212
#include <linux/poll.h>
13+
#include <linux/kthread.h>
14+
#include <linux/err.h>
1315

1416
// `bindgen` gets confused at certain things
1517
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;

rust/kernel/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ pub mod printk;
4242
pub mod random;
4343
mod static_assert;
4444
pub mod sync;
45+
pub mod thread;
4546

4647
#[cfg(CONFIG_SYSCTL)]
4748
pub mod sysctl;

rust/kernel/thread.rs

Lines changed: 252 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,252 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
//! A kernel thread (kthread)
4+
//!
5+
//! This modules allows Rust code to create/wakup/stop a kernel thread
6+
7+
use crate::c_types;
8+
use crate::error::{ptr_to_result, Error, KernelResult};
9+
use crate::{bindings, cstr, CStr};
10+
11+
use alloc::boxed::Box;
12+
use core::ops::FnOnce;
13+
14+
extern "C" {
15+
#[allow(improper_ctypes)]
16+
fn rust_helper_get_task_struct(task: *mut bindings::task_struct);
17+
#[allow(improper_ctypes)]
18+
fn rust_helper_put_task_struct(task: *mut bindings::task_struct);
19+
}
20+
21+
/// Function passed to `kthread_create_on_node` as function pointer. No other user.
22+
#[no_mangle]
23+
unsafe extern "C" fn rust_thread_func(data: *mut c_types::c_void) -> c_types::c_int {
24+
// ·Box::from_raw()` to get the ownership of the closure.
25+
let c = Box::from_raw(data as *mut Box<dyn FnOnce() -> KernelResult<()>>);
26+
27+
let ret = c();
28+
29+
match ret {
30+
Ok(_) => 0,
31+
Err(e) => e.to_kernel_errno(),
32+
}
33+
}
34+
35+
/// A kernel thread handle
36+
pub struct Thread {
37+
/// Pointer to kernel thread
38+
task: *mut bindings::task_struct,
39+
}
40+
41+
impl Thread {
42+
/// Creates a new thread using C-style function pointer.
43+
///
44+
/// No extra memory allocation for thread creation that `kthread_create_on_node`. Use when
45+
/// closure allocation overhead is unacceptable or there is already a C style thread function.
46+
/// Otherwise, please consider using [`Thread::try_new`].
47+
///
48+
/// # Safety
49+
///
50+
/// This function itself is safe, but the users need to make sure `f` is a proper function
51+
/// pointer, and `f` should use `arg` correctly.
52+
///
53+
/// # Context
54+
///
55+
/// This function might sleep due to the memory allocation and waiting for completion in
56+
/// `kthread_create_on_node`. Therefore cannot call this in atomic contexts(i.e. preemption-off
57+
/// contexts).
58+
pub fn try_new_c_style(
59+
name: CStr,
60+
f: unsafe extern "C" fn(*mut c_types::c_void) -> c_types::c_int,
61+
arg: *mut c_types::c_void,
62+
) -> KernelResult<Self> {
63+
let task;
64+
65+
// SAFETY:
66+
//
67+
// - `kthread_create_on_node` won't use `f` or dereference `arg`, if `arg` is an invalid
68+
// pointer or `f` doesn't handle `arg` as it should, the new thread will case unsafe
69+
// behaviors.
70+
//
71+
// - `kthread_create_on_node` will copy the content of `name`, so we don't need to make the
72+
// `name` live longer.
73+
unsafe {
74+
task = ptr_to_result(bindings::kthread_create_on_node(
75+
Some(f),
76+
arg,
77+
bindings::NUMA_NO_NODE,
78+
cstr!("%s").as_ptr() as _,
79+
name.as_ptr(),
80+
))?;
81+
}
82+
83+
// Increases the refcount of the task, so that it won't go away if it `do_exit`.
84+
// SAFETY: `task` is a proper pointer pointing to a newly created thread.
85+
unsafe {
86+
rust_helper_get_task_struct(task);
87+
}
88+
89+
Ok(Thread { task })
90+
}
91+
92+
/// Creates a new thread.
93+
///
94+
/// # Examples
95+
///
96+
/// ```
97+
/// use kernel::thread::Thread;
98+
/// use alloc::boxed::Box;
99+
///
100+
/// let mut a = 1;
101+
///
102+
/// let t = Thread::try_new(
103+
/// move || {
104+
/// let b = Box::try_new(42)?;
105+
///
106+
/// for _ in 0..10 {
107+
/// a = a + 1;
108+
/// println!("Hello Rust Thread {}", a + b.as_ref());
109+
/// }
110+
/// Ok(())
111+
/// },
112+
/// cstr!("rust-thread")
113+
/// )?;
114+
///
115+
/// t.wake_up();
116+
/// ```
117+
///
118+
/// # Context
119+
///
120+
/// This function might sleep due to the memory allocation and waiting for completion in
121+
/// `kthread_create_on_node`. Therefore cannot call this in atomic contexts(i.e. preemption-off
122+
/// contexts).
123+
pub fn try_new<F>(name: CStr, f: F) -> KernelResult<Self>
124+
where
125+
F: FnOnce() -> KernelResult<()>,
126+
F: Send + 'static,
127+
{
128+
// Allocate closure here, because this function maybe returns before `rust_thread_func`
129+
// (the function that use the closure) get executed.
130+
let boxed_fn: Box<dyn FnOnce() -> KernelResult<()> + 'static> = Box::try_new(f)?;
131+
132+
// Double boxing here because `dyn FnOnce` is a fat pointer, and we can only pass a usize
133+
// as the `data` for `kthread_create_on_node`.
134+
//
135+
// We `into_raw` from this side, and will `from_raw` at the other side to transfer the
136+
// ownership of the boxed data.
137+
let double_box_ptr = Box::into_raw(Box::try_new(boxed_fn)?) as *mut _;
138+
139+
// SAFETY:
140+
//
141+
// `try_new_c_style` is safe, but we need to make sure the newly created thread running
142+
// safely.
143+
//
144+
// - `double_box_ptr` is a proper pointer (generated by `Box::into_raw()`), and if succeed,
145+
// the new thread will get the ownership.
146+
//
147+
// - `rust_thread_func` is provided by us and handles the deference of the
148+
// `double_box_ptr`.
149+
let result = Self::try_new_c_style(name, rust_thread_func, double_box_ptr);
150+
151+
if let Err(e) = result {
152+
// Creation fails, we need to get back the double boxed closure.
153+
//
154+
// SAFETY:
155+
//
156+
// `double_box_ptr` is a proper pointer generated by a `Box::into_raw()` from a box
157+
// created by us, if the thread creation fails, no one will consume that pointer.
158+
unsafe {
159+
Box::from_raw(double_box_ptr);
160+
}
161+
162+
Err(e)
163+
} else {
164+
result
165+
}
166+
}
167+
168+
/// Wakes up the thread.
169+
///
170+
/// Note that a newly created thread (e.g. via [`Thread::try_new`]) will not run until a
171+
/// [`Thread::wake_up`] is called.
172+
///
173+
/// # Context
174+
///
175+
/// This function might sleep, don't call in atomic contexts.
176+
pub fn wake_up(&self) {
177+
// SAFETY:
178+
//
179+
// `task` is a valid pointer to a kernel thread structure, the refcount of which is
180+
// increased in `try_new*`, so it won't point to a freed `task_struct`. And it's not
181+
// stopped because `stop` will consume the [`Thread`].
182+
unsafe {
183+
bindings::wake_up_process(self.task);
184+
}
185+
}
186+
187+
/// Stops the thread.
188+
///
189+
/// - If the thread hasn't been waken up after creation, the thread closure won't be called,
190+
/// and will return `EINTR`. Note that a thread may not be waken up even after
191+
/// [`Thread::wake_up`] is called.
192+
///
193+
/// - Otherwise, wait for the closure to return or the thread `do_exit` itself.
194+
///
195+
/// Consume the [`Thread`] so that it's not accessible. Return the result of the thread
196+
/// closure (or the exit code in [`KernelResult`] format).
197+
///
198+
/// # Context
199+
///
200+
/// This function might sleep, don't call in atomic contexts.
201+
pub fn stop(self) -> KernelResult<()> {
202+
let ret;
203+
// SAFETY:
204+
//
205+
// `task` is a valid pointer to a kernel thread structure, the refcount of which is
206+
// increased in `try_new*`, so it won't point to a freed `task_struct`. And it's not
207+
// stopped because `stop` will consume the [`Thread`].
208+
unsafe { ret = bindings::kthread_stop(self.task) }
209+
210+
if ret == 0 {
211+
Ok(())
212+
} else {
213+
Err(Error::from_kernel_errno(ret))
214+
}
215+
}
216+
}
217+
218+
impl Drop for Thread {
219+
fn drop(&mut self) {
220+
// Decreases the refcount of the thread, the thread may still be running after we `drop`
221+
// the `Thread`.
222+
//
223+
// SAFETY:
224+
//
225+
// At least one refcount is held by `Thread::try_new*` and refcount of `task_struct` is
226+
// implemented by atomics.
227+
unsafe {
228+
rust_helper_put_task_struct(self.task);
229+
}
230+
}
231+
}
232+
233+
/// Tries to give up the cpu and let another thread to run.
234+
///
235+
/// This maps to kernel's `schedule` function, which is similar to [`std::thread::yield_now`].
236+
///
237+
/// # Context
238+
///
239+
/// This function might sleep, don't call in atomic contexts.
240+
///
241+
/// [`std::thread::yield_now`]: https://doc.rust-lang.org/std/thread/fn.yield_now.html
242+
pub fn schedule() {
243+
// SAFETY:
244+
//
245+
// If we can schedule back from other thread, then this can be treated as no-ops. A special
246+
// case are a thread sets its state to `TASK_DEAD`, and then `schedule` will not come.
247+
// Currently we don't have a way to do this safely in Rust, and in the future, we probably
248+
// still don't allow it.
249+
unsafe {
250+
bindings::schedule();
251+
}
252+
}

0 commit comments

Comments
 (0)