Fix context switching.

Previously there was a triple fault, due to a combination of reasons
(e.g. rsp and rbp being ordered in the struct and in the assembly).

Now, the locks will be held __all the way until the new context__ has
been switched to, which completely eliminates any possibility that the
"pcid fault" originates here.

While I am unsure whether this will work, this could also be an
opportunity to be able to remove CONTEXT_SWITCH_LOCK fully.
This commit is contained in:
4lDO2
2021-02-07 11:59:46 +01:00
parent ef4270e473
commit 47c3b2269f
3 changed files with 64 additions and 27 deletions

View File

@@ -1,7 +1,5 @@
use core::mem;
use core::sync::atomic::{AtomicU8, Ordering};
use alloc::sync::Arc;
use core::sync::atomic::AtomicBool;
use crate::syscall::FloatRegisters;
@@ -9,7 +7,7 @@ use crate::syscall::FloatRegisters;
/// Compare and exchange this to true when beginning a context switch on any CPU
/// The `Context::switch_to` function will set it back to false, allowing other CPU's to switch
/// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicU8 = AtomicU8::new(AbiCompatBool::False as u8);
pub static CONTEXT_SWITCH_LOCK: AtomicBool = AtomicBool::new(false);
const ST_RESERVED: u128 = 0xFFFF_FFFF_FFFF_0000_0000_0000_0000_0000;
@@ -151,8 +149,12 @@ pub unsafe extern "C" fn switch_to(_prev: &mut Context, _next: &mut Context) {
// - we can modify scratch registers, e.g. rax
// - we cannot change callee-preserved registers arbitrarily, e.g. rbx, which is why we
// store them here in the first place.
"mov rax, [rdi + 0x00] // load `prev.fx`
fxsave64 [rax] // save processor simd state in `prev.fx`
"
// load `prev.fx`
mov rax, [rdi + 0x00]
// save processor SSE/FPU/AVX state in `prev.fx` pointee
fxsave64 [rax]
// set `prev.loadable` to true
mov BYTE PTR [rdi + 0x50], {true}
@@ -168,6 +170,7 @@ pub unsafe extern "C" fn switch_to(_prev: &mut Context, _next: &mut Context) {
fxrstor64 [rax]
switch_to.after_fx:
// Save the current CR3, and load the next CR3 if not identical
mov rcx, cr3
mov [rdi + 0x08], rcx
mov rax, [rsi + 0x08]
@@ -177,6 +180,7 @@ pub unsafe extern "C" fn switch_to(_prev: &mut Context, _next: &mut Context) {
mov cr3, rax
switch_to.same_cr3:
// Save old registers, and load new ones
mov [rdi + 0x18], rbx
mov rbx, [rsi + 0x18]
@@ -192,24 +196,32 @@ pub unsafe extern "C" fn switch_to(_prev: &mut Context, _next: &mut Context) {
mov [rdi + 0x38], r15
mov r15, [rsi + 0x38]
mov [rdi + 0x40], rsp
mov rsp, [rsi + 0x40]
mov [rdi + 0x40], rbp
mov rbp, [rsi + 0x40]
mov [rdi + 0x48], rbp
mov rbp, [rsi + 0x48]
mov [rdi + 0x48], rsp
mov rsp, [rsi + 0x48]
// push RFLAGS (can only be modified via stack)
pushfq
// pop RFLAGS into `self.rflags`
pop QWORD PTR [rdi + 0x10]
// push `next.rflags`
push QWORD PTR [rsi + 0x10]
// pop into RFLAGS
popfq
// Unset global lock after loading registers but before switch
xor eax, eax
xchg BYTE PTR [rip+{switch_lock}], al",
// When we return, we cannot even guarantee that the return address on the stack, points to
// the calling function, `context::switch`. Thus, we have to execute this Rust hook by
// ourselves, which will unlock the contexts before the later switch.
call {switch_hook}
",
true = const(AbiCompatBool::True as u8),
switch_lock = sym CONTEXT_SWITCH_LOCK,
switch_hook = sym crate::context::switch_finish_hook,
);
}

View File

@@ -34,6 +34,8 @@ pub mod signal;
/// Timeout handling
pub mod timeout;
pub use self::switch::switch_finish_hook;
/// Limit on number of contexts
pub const CONTEXT_MAX_CONTEXTS: usize = (isize::max_value() as usize) - 1;

View File

@@ -1,8 +1,11 @@
use core::cell::Cell;
use core::ops::Bound;
use core::sync::atomic::Ordering;
use alloc::sync::Arc;
use spin::RwLock;
use crate::context::signal::signal_handler;
use crate::context::{arch, contexts, Context, Status, CONTEXT_ID};
use crate::gdt;
@@ -64,6 +67,24 @@ unsafe fn update(context: &mut Context, cpu_id: usize) {
}
}
struct SwitchResult {
prev_lock: Arc<RwLock<Context>>,
next_lock: Arc<RwLock<Context>>,
}
pub unsafe extern "C" fn switch_finish_hook() {
if let Some(SwitchResult { prev_lock, next_lock }) = SWITCH_RESULT.take() {
prev_lock.force_write_unlock();
next_lock.force_write_unlock();
} else {
panic!("SWITCH_RESULT was not set");
}
arch::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
}
#[thread_local]
static SWITCH_RESULT: Cell<Option<SwitchResult>> = Cell::new(None);
unsafe fn runnable(context: &Context, cpu_id: usize) -> bool {
// Switch to context if it needs to run, is not currently running, and is owned by the current CPU
!context.running && !context.ptrace_stop && context.status == Status::Runnable && context.cpu_id == Some(cpu_id)
@@ -79,7 +100,7 @@ pub unsafe fn switch() -> bool {
let ticks = PIT_TICKS.swap(0, Ordering::SeqCst);
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::CONTEXT_SWITCH_LOCK.compare_and_swap(0_u8, 1_u8, Ordering::SeqCst) == 0_u8 {
while arch::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
interrupt::pause();
}
@@ -164,26 +185,28 @@ pub unsafe fn switch() -> bool {
}
let from_arch_ptr: *mut arch::Context = &mut from_context_guard.arch;
let to_arch: &mut arch::Context = &mut to_context.arch;
core::mem::forget(from_context_guard);
/*
let mut from_context_lock = Arc::clone(&from_context_lock);
let mut to_context_lock = Arc::clone(&to_context_lock);
*/
let prev_arch: &mut arch::Context = &mut *from_arch_ptr;
let next_arch: &mut arch::Context = &mut to_context.arch;
arch::switch_to(&mut *from_arch_ptr, to_arch);
// to_context_guard only exists as a raw pointer, but is still locked
/*
to_context_lock.force_write_unlock();
from_context_lock.force_write_unlock();
*/
SWITCH_RESULT.set(Some(SwitchResult {
prev_lock: from_context_lock,
next_lock: to_context_lock,
}));
arch::switch_to(prev_arch, next_arch);
// NOTE: After switch_to is called, the return address can even be different from the
// current return address, meaning that we cannot use local variables here, and that we
// need to use the `switch_finish_hook` to be able to release the locks.
true
} else {
// No target was found, unset global lock and return
arch::CONTEXT_SWITCH_LOCK.store(0_u8, Ordering::SeqCst);
arch::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
false
}