diff --git a/src/arch/aarch64/device/rtc.rs b/src/arch/aarch64/device/rtc.rs index 178afcf..d7aaa9a 100644 --- a/src/arch/aarch64/device/rtc.rs +++ b/src/arch/aarch64/device/rtc.rs @@ -19,7 +19,7 @@ static mut PL031_RTC: Pl031rtc = Pl031rtc { pub unsafe fn init() { PL031_RTC.init(); - time::START.lock().0 = PL031_RTC.time(); + *time::START.lock() = (PL031_RTC.time() as u128) * time::NANOS_PER_SEC; } struct Pl031rtc { diff --git a/src/arch/aarch64/interrupt/irq.rs b/src/arch/aarch64/interrupt/irq.rs index 7501836..a21bf47 100644 --- a/src/arch/aarch64/interrupt/irq.rs +++ b/src/arch/aarch64/interrupt/irq.rs @@ -51,10 +51,7 @@ pub unsafe fn irq_handler_com1(irq: u32) { pub unsafe fn irq_handler_gentimer(irq: u32) { GENTIMER.clear_irq(); { - let mut offset = time::OFFSET.lock(); - let sum = offset.1 + GENTIMER.clk_freq as u64; - offset.1 = sum % 1_000_000_000; - offset.0 += sum / 1_000_000_000; + *time::OFFSET.lock() += GENTIMER.clk_freq as u128; } timeout::trigger(); diff --git a/src/arch/x86/device/pit.rs b/src/arch/x86/device/pit.rs index a39cd00..7c96be1 100644 --- a/src/arch/x86/device/pit.rs +++ b/src/arch/x86/device/pit.rs @@ -15,3 +15,10 @@ pub unsafe fn init() { CHAN0.write((CHAN0_DIVISOR & 0xFF) as u8); CHAN0.write((CHAN0_DIVISOR >> 8) as u8); } + +pub unsafe fn read() -> u16 { + COMMAND.write(SELECT_CHAN0 | 0); + let low = CHAN0.read(); + let high = CHAN0.read(); + ((high as u16) << 8) | (low as u16) +} diff --git a/src/arch/x86/device/rtc.rs b/src/arch/x86/device/rtc.rs index 6c20f67..d68cdbb 100644 --- a/src/arch/x86/device/rtc.rs +++ b/src/arch/x86/device/rtc.rs @@ -3,7 +3,7 @@ use crate::time; pub fn init() { let mut rtc = Rtc::new(); - time::START.lock().0 = rtc.time(); + *time::START.lock() = (rtc.time() as u128) * time::NANOS_PER_SEC; } fn cvt_bcd(value: usize) -> usize { diff --git a/src/arch/x86/interrupt/irq.rs b/src/arch/x86/interrupt/irq.rs index 3704557..96845fb 100644 --- a/src/arch/x86/interrupt/irq.rs +++ b/src/arch/x86/interrupt/irq.rs @@ -11,6 +11,8 @@ use crate::scheme::debug::{debug_input, debug_notify}; use crate::scheme::serio::serio_input; use crate::{context, time}; +pub const PIT_RATE: u128 = 2_250_286; + //resets to 0 in context::switch() #[thread_local] pub static PIT_TICKS: AtomicUsize = AtomicUsize::new(0); @@ -137,13 +139,8 @@ unsafe fn ioapic_unmask(irq: usize) { interrupt_stack!(pit_stack, |_stack| { // Saves CPU time by not sending IRQ event irq_trigger(0); - const PIT_RATE: u64 = 2_250_286; - { - let mut offset = time::OFFSET.lock(); - let sum = offset.1 + PIT_RATE; - offset.1 = sum % 1_000_000_000; - offset.0 += sum / 1_000_000_000; + *time::OFFSET.lock() += PIT_RATE; } eoi(0); @@ -270,13 +267,8 @@ interrupt!(lapic_error, || { }); interrupt!(calib_pit, || { - const PIT_RATE: u64 = 2_250_286; - { - let mut offset = time::OFFSET.lock(); - let sum = offset.1 + PIT_RATE; - offset.1 = sum % 1_000_000_000; - offset.0 += sum / 1_000_000_000; + *time::OFFSET.lock() += PIT_RATE; } eoi(0); diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 13ba989..31a74e2 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -160,7 +160,7 @@ pub unsafe fn init( let flush_all = map_percpu(cpu_id, KernelMapper::lock_manually(cpu_id).get_mut().expect("expected KernelMapper not to be locked re-entrant in paging::init")); flush_all.flush(); - return init_tcb(cpu_id); + init_tcb(cpu_id) } pub unsafe fn init_ap( diff --git a/src/arch/x86/start.rs b/src/arch/x86/start.rs index 859a758..77a437f 100644 --- a/src/arch/x86/start.rs +++ b/src/arch/x86/start.rs @@ -96,9 +96,9 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! { // Initialize logger log::init_logger(|r| { use core::fmt::Write; - let _ = write!( + let _ = writeln!( super::debug::Writer::new(), - "{}:{} -- {}\n", + "{}:{} -- {}", r.target(), r.level(), r.args() diff --git a/src/arch/x86/stop.rs b/src/arch/x86/stop.rs index 0b12b8b..38aaef8 100644 --- a/src/arch/x86/stop.rs +++ b/src/arch/x86/stop.rs @@ -40,7 +40,7 @@ fn userspace_acpi_shutdown() { return; } log::info!("Waiting one second for ACPI driver to run the shutdown sequence."); - let (initial_s, initial_ns) = time::monotonic(); + let initial = time::monotonic(); // Since this driver is a userspace process, and we do not use any magic like directly // context switching, we have to wait for the userspace driver to complete, with a timeout. @@ -51,13 +51,9 @@ fn userspace_acpi_shutdown() { // event flag like EVENT_DIRECT, which has already been suggested for IRQs. // TODO: Waitpid with timeout? Because, what if the ACPI driver would crash? let _ = unsafe { context::switch() }; - let (current_s, current_ns) = time::monotonic(); - let diff_s = current_s - initial_s; - let diff_part_ns = current_ns - initial_ns; - let diff_ns = diff_s * 1_000_000_000 + diff_part_ns; - - if diff_ns > 1_000_000_000 { + let current = time::monotonic(); + if current - initial > time::NANOS_PER_SEC { log::info!("Timeout reached, thus falling back to other shutdown methods."); return; } diff --git a/src/arch/x86_64/device/pit.rs b/src/arch/x86_64/device/pit.rs index a39cd00..7c96be1 100644 --- a/src/arch/x86_64/device/pit.rs +++ b/src/arch/x86_64/device/pit.rs @@ -15,3 +15,10 @@ pub unsafe fn init() { CHAN0.write((CHAN0_DIVISOR & 0xFF) as u8); CHAN0.write((CHAN0_DIVISOR >> 8) as u8); } + +pub unsafe fn read() -> u16 { + COMMAND.write(SELECT_CHAN0 | 0); + let low = CHAN0.read(); + let high = CHAN0.read(); + ((high as u16) << 8) | (low as u16) +} diff --git a/src/arch/x86_64/device/rtc.rs b/src/arch/x86_64/device/rtc.rs index 6c20f67..d68cdbb 100644 --- a/src/arch/x86_64/device/rtc.rs +++ b/src/arch/x86_64/device/rtc.rs @@ -3,7 +3,7 @@ use crate::time; pub fn init() { let mut rtc = Rtc::new(); - time::START.lock().0 = rtc.time(); + *time::START.lock() = (rtc.time() as u128) * time::NANOS_PER_SEC; } fn cvt_bcd(value: usize) -> usize { diff --git a/src/arch/x86_64/interrupt/irq.rs b/src/arch/x86_64/interrupt/irq.rs index 3704557..96845fb 100644 --- a/src/arch/x86_64/interrupt/irq.rs +++ b/src/arch/x86_64/interrupt/irq.rs @@ -11,6 +11,8 @@ use crate::scheme::debug::{debug_input, debug_notify}; use crate::scheme::serio::serio_input; use crate::{context, time}; +pub const PIT_RATE: u128 = 2_250_286; + //resets to 0 in context::switch() #[thread_local] pub static PIT_TICKS: AtomicUsize = AtomicUsize::new(0); @@ -137,13 +139,8 @@ unsafe fn ioapic_unmask(irq: usize) { interrupt_stack!(pit_stack, |_stack| { // Saves CPU time by not sending IRQ event irq_trigger(0); - const PIT_RATE: u64 = 2_250_286; - { - let mut offset = time::OFFSET.lock(); - let sum = offset.1 + PIT_RATE; - offset.1 = sum % 1_000_000_000; - offset.0 += sum / 1_000_000_000; + *time::OFFSET.lock() += PIT_RATE; } eoi(0); @@ -270,13 +267,8 @@ interrupt!(lapic_error, || { }); interrupt!(calib_pit, || { - const PIT_RATE: u64 = 2_250_286; - { - let mut offset = time::OFFSET.lock(); - let sum = offset.1 + PIT_RATE; - offset.1 = sum % 1_000_000_000; - offset.0 += sum / 1_000_000_000; + *time::OFFSET.lock() += PIT_RATE; } eoi(0); diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs index 480f61f..830e9d3 100644 --- a/src/arch/x86_64/rmm.rs +++ b/src/arch/x86_64/rmm.rs @@ -413,6 +413,8 @@ pub unsafe fn init( log::warn!("{:X}:{:X} overlaps with acpi {:X}:{:X}", base, size, acpi_base, acpi_size); new_base = cmp::max(new_base, acpi_end); } + + // Ensure initfs areas are not used if base < initfs_end && base + size > initfs_base { log::warn!("{:X}:{:X} overlaps with initfs {:X}:{:X}", base, size, initfs_base, initfs_size); new_base = cmp::max(new_base, initfs_end); @@ -427,7 +429,7 @@ pub unsafe fn init( } if size == 0 { - // Area is zero sized + // Area is zero sized, skip continue; } diff --git a/src/arch/x86_64/stop.rs b/src/arch/x86_64/stop.rs index 0b12b8b..38aaef8 100644 --- a/src/arch/x86_64/stop.rs +++ b/src/arch/x86_64/stop.rs @@ -40,7 +40,7 @@ fn userspace_acpi_shutdown() { return; } log::info!("Waiting one second for ACPI driver to run the shutdown sequence."); - let (initial_s, initial_ns) = time::monotonic(); + let initial = time::monotonic(); // Since this driver is a userspace process, and we do not use any magic like directly // context switching, we have to wait for the userspace driver to complete, with a timeout. @@ -51,13 +51,9 @@ fn userspace_acpi_shutdown() { // event flag like EVENT_DIRECT, which has already been suggested for IRQs. // TODO: Waitpid with timeout? Because, what if the ACPI driver would crash? let _ = unsafe { context::switch() }; - let (current_s, current_ns) = time::monotonic(); - let diff_s = current_s - initial_s; - let diff_part_ns = current_ns - initial_ns; - let diff_ns = diff_s * 1_000_000_000 + diff_part_ns; - - if diff_ns > 1_000_000_000 { + let current = time::monotonic(); + if current - initial > time::NANOS_PER_SEC { log::info!("Timeout reached, thus falling back to other shutdown methods."); return; } diff --git a/src/context/context.rs b/src/context/context.rs index b526065..566448c 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -116,7 +116,7 @@ pub struct ContextSnapshot { pub status_reason: &'static str, pub running: bool, pub cpu_id: Option, - pub ticks: u64, + pub cpu_time: u128, pub syscall: Option<(usize, usize, usize, usize, usize, usize)>, // Clone fields //TODO: is there a faster way than allocation? @@ -160,7 +160,7 @@ impl ContextSnapshot { status_reason: context.status_reason, running: context.running, cpu_id: context.cpu_id, - ticks: context.ticks, + cpu_time: context.cpu_time, syscall: context.syscall, name, files, @@ -200,8 +200,10 @@ pub struct Context { pub running: bool, /// CPU ID, if locked pub cpu_id: Option, - /// Number of timer ticks executed - pub ticks: u64, + /// Time this context was switched to + pub switch_time: u128, + /// Amount of CPU time used + pub cpu_time: u128, /// Current system call pub syscall: Option<(usize, usize, usize, usize, usize, usize)>, /// Head buffer to use when system call buffers are not page aligned @@ -215,7 +217,7 @@ pub struct Context { /// Context should handle pending signals pub pending: VecDeque, /// Context should wake up at specified time - pub wake: Option<(u64, u64)>, + pub wake: Option, /// The architecture specific context pub arch: arch::Context, /// Kernel FX - used to store SIMD and FPU registers on context switch @@ -348,7 +350,8 @@ impl Context { status_reason: "", running: false, cpu_id: None, - ticks: 0, + switch_time: 0, + cpu_time: 0, syscall: None, syscall_head, syscall_tail, diff --git a/src/context/switch.rs b/src/context/switch.rs index 62e4f25..f4a3ee0 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -57,7 +57,7 @@ unsafe fn update(context: &mut Context, cpu_id: usize) { let wake = context.wake.expect("context::switch: wake not set"); let current = time::monotonic(); - if current.0 > wake.0 || (current.0 == wake.0 && current.1 >= wake.1) { + if current >= wake { context.wake = None; context.unblock(); } @@ -96,7 +96,7 @@ unsafe fn runnable(context: &Context, cpu_id: usize) -> bool { pub unsafe fn switch() -> bool { // TODO: Better memory orderings? //set PIT Interrupt counter to 0, giving each process same amount of PIT ticks - let ticks = PIT_TICKS.swap(0, Ordering::SeqCst); + let _ticks = PIT_TICKS.swap(0, Ordering::SeqCst); // Set the global lock to avoid the unsafe operations below from causing issues while arch::CONTEXT_SWITCH_LOCK.compare_exchange_weak(false, true, Ordering::SeqCst, Ordering::Relaxed).is_err() { @@ -104,6 +104,7 @@ pub unsafe fn switch() -> bool { } let cpu_id = crate::cpu_id(); + let switch_time = crate::time::monotonic(); let from_context_lock; let mut from_context_guard; @@ -116,7 +117,6 @@ pub unsafe fn switch() -> bool { .current() .expect("context::switch: not inside of context")); from_context_guard = from_context_lock.write(); - from_context_guard.ticks += ticks as u64 + 1; // Always round ticks up } for (pid, context_lock) in contexts.iter() { @@ -162,8 +162,14 @@ pub unsafe fn switch() -> bool { if let Some((to_context_lock, to_ptr)) = to_context_lock { let to_context: &mut Context = &mut *to_ptr; + // Set old context as not running and update CPU time from_context_guard.running = false; + from_context_guard.cpu_time += switch_time - from_context_guard.switch_time; + + // Set new context as running and set switch time to_context.running = true; + to_context.switch_time = switch_time; + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { if let Some(ref stack) = to_context.kstack { diff --git a/src/context/timeout.rs b/src/context/timeout.rs index 5e0fdb3..220811d 100644 --- a/src/context/timeout.rs +++ b/src/context/timeout.rs @@ -12,7 +12,7 @@ struct Timeout { pub scheme_id: SchemeId, pub event_id: usize, pub clock: usize, - pub time: (u64, u64), + pub time: u128, } type Registry = VecDeque; @@ -35,7 +35,7 @@ pub fn register(scheme_id: SchemeId, event_id: usize, clock: usize, time: TimeSp scheme_id, event_id, clock, - time: (time.tv_sec as u64, time.tv_nsec as u64) + time: (time.tv_sec as u128 * time::NANOS_PER_SEC) + (time.tv_nsec as u128) }); } @@ -50,11 +50,11 @@ pub fn trigger() { let trigger = match registry[i].clock { CLOCK_MONOTONIC => { let time = registry[i].time; - mono.0 > time.0 || (mono.0 == time.0 && mono.1 >= time.1) + mono >= time }, CLOCK_REALTIME => { let time = registry[i].time; - real.0 > time.0 || (real.0 == time.0 && real.1 >= time.1) + real >= time }, clock => { println!("timeout::trigger: unknown clock {}", clock); diff --git a/src/scheme/sys/context.rs b/src/scheme/sys/context.rs index 9555433..ec4fcfe 100644 --- a/src/scheme/sys/context.rs +++ b/src/scheme/sys/context.rs @@ -63,18 +63,13 @@ pub fn resource() -> Result> { format!("?") }; - let ticks = context.ticks; - let ticks_string = if ticks >= 1000 * 1000 * 1000 * 1000 { - format!("{} T", ticks / 1000 / 1000 / 1000 / 1000) - } else if ticks >= 1000 * 1000 * 1000 { - format!("{} G", ticks / 1000 / 1000 / 1000) - } else if ticks >= 1000 * 1000 { - format!("{} M", ticks / 1000 / 1000) - } else if ticks >= 1000 { - format!("{} K", ticks / 1000) - } else { - format!("{}", ticks) - }; + let cpu_time = context.cpu_time / crate::time::NANOS_PER_SEC; + let cpu_time_string = format!( + "{:02}:{:02}:{:02}", + cpu_time / 3600, + (cpu_time / 60) % 60, + cpu_time % 60 + ); let mut memory = context.kfx.len(); if let Some(ref kstack) = context.kstack { @@ -98,7 +93,7 @@ pub fn resource() -> Result> { format!("{} B", memory) }; - string.push_str(&format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<8}{:<8}{}\n", + string.push_str(&format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<9}{:<8}{}\n", context.id.into(), context.pgid.into(), context.ppid.into(), @@ -110,7 +105,7 @@ pub fn resource() -> Result> { context.ens.into(), stat_string, cpu_string, - ticks_string, + cpu_time_string, memory_string, *context.name.read())); } diff --git a/src/scheme/time.rs b/src/scheme/time.rs index a5f9286..c82da9e 100644 --- a/src/scheme/time.rs +++ b/src/scheme/time.rs @@ -58,8 +58,8 @@ impl Scheme for TimeScheme { CLOCK_MONOTONIC => time::monotonic(), _ => return Err(Error::new(EINVAL)) }; - time_buf[i].tv_sec = arch_time.0 as i64; - time_buf[i].tv_nsec = arch_time.1 as i32; + time_buf[i].tv_sec = (arch_time / time::NANOS_PER_SEC) as i64; + time_buf[i].tv_nsec = (arch_time % time::NANOS_PER_SEC) as i32; i += 1; } diff --git a/src/syscall/futex.rs b/src/syscall/futex.rs index 24464f3..763c170 100644 --- a/src/syscall/futex.rs +++ b/src/syscall/futex.rs @@ -107,8 +107,7 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R if let Some(timeout) = timeout_opt { let start = time::monotonic(); - let sum = start.1 + timeout.tv_nsec as u64; - let end = (start.0 + timeout.tv_sec as u64 + sum / 1_000_000_000, sum % 1_000_000_000); + let end = start + (timeout.tv_sec as u128 * time::NANOS_PER_SEC) + (timeout.tv_nsec as u128); context.wake = Some(end); } diff --git a/src/syscall/time.rs b/src/syscall/time.rs index 5fc089e..24eed80 100644 --- a/src/syscall/time.rs +++ b/src/syscall/time.rs @@ -11,8 +11,8 @@ pub fn clock_gettime(clock: usize, time: &mut TimeSpec) -> Result { _ => return Err(Error::new(EINVAL)) }; - time.tv_sec = arch_time.0 as i64; - time.tv_nsec = arch_time.1 as i32; + time.tv_sec = (arch_time / time::NANOS_PER_SEC) as i64; + time.tv_nsec = (arch_time % time::NANOS_PER_SEC) as i32; Ok(0) } @@ -20,8 +20,7 @@ pub fn clock_gettime(clock: usize, time: &mut TimeSpec) -> Result { pub fn nanosleep(req: &TimeSpec, rem_opt: Option<&mut TimeSpec>) -> Result { //start is a tuple of (seconds, nanoseconds) let start = time::monotonic(); - let sum = start.1 + req.tv_nsec as u64; - let mut end = (start.0 + req.tv_sec as u64 + sum / 1_000_000_000, sum % 1_000_000_000); + let end = start + (req.tv_sec as u128 * time::NANOS_PER_SEC) + (req.tv_nsec as u128); { let contexts = context::contexts(); @@ -49,14 +48,10 @@ pub fn nanosleep(req: &TimeSpec, rem_opt: Option<&mut TimeSpec>) -> Result = Mutex::new((0, 0)); -/// Kernel up time, measured in (seconds, nanoseconds) since `START_TIME` -pub static OFFSET: Mutex<(u64, u64)> = Mutex::new((0, 0)); +pub const NANOS_PER_SEC: u128 = 1_000_000_000; -pub fn monotonic() -> (u64, u64) { +/// Kernel start time, measured in (seconds, nanoseconds) since Unix epoch +pub static START: Mutex = Mutex::new(0); +/// Kernel up time, measured in (seconds, nanoseconds) since `START_TIME` +pub static OFFSET: Mutex = Mutex::new(0); + +pub fn monotonic() -> u128 { *OFFSET.lock() } -pub fn realtime() -> (u64, u64) { - let offset = monotonic(); - let start = *START.lock(); - let sum = start.1 + offset.1; - (start.0 + offset.0 + sum / 1_000_000_000, sum % 1_000_000_000) +pub fn realtime() -> u128 { + *START.lock() + monotonic() }