kernel/scheduler/
mlfq.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Multilevel feedback queue scheduler for Tock
6//!
7//! Based on the MLFQ rules described in "Operating Systems: Three Easy Pieces"
8//! by Remzi H. Arpaci-Dusseau and Andrea C. Arpaci-Dusseau.
9//!
10//! This scheduler can be summarized by the following rules:
11//!
12//! - Rule 1: If Priority(A) > Priority(B), and both are ready, A runs (B
13//!   doesn't).
14//! - Rule 2: If Priority(A) = Priority(B), A & B run in round-robin fashion
15//!   using the time slice (quantum length) of the given queue.
16//! - Rule 3: When a job enters the system, it is placed at the highest priority
17//!   (the topmost queue).
18//! - Rule 4: Once a job uses up its time allotment at a given level (regardless
19//!   of how many times it has given up the CPU), its priority is reduced (i.e.,
20//!   it moves down one queue).
21//! - Rule 5: After some time period S, move all the jobs in the system to the
22//!   topmost queue.
23
24use core::cell::Cell;
25use core::num::NonZeroU32;
26
27use crate::collections::list::{List, ListLink, ListNode};
28use crate::hil::time::{self, ConvertTicks, Ticks};
29use crate::platform::chip::Chip;
30use crate::process::Process;
31use crate::process::StoppedExecutingReason;
32use crate::scheduler::{Scheduler, SchedulingDecision};
33
34#[derive(Default)]
35struct MfProcState {
36    /// Total CPU time used by this process while in current queue
37    us_used_this_queue: Cell<u32>,
38}
39
40/// Nodes store per-process state
41pub struct MLFQProcessNode<'a> {
42    proc: &'static Option<&'static dyn Process>,
43    state: MfProcState,
44    next: ListLink<'a, MLFQProcessNode<'a>>,
45}
46
47impl<'a> MLFQProcessNode<'a> {
48    pub fn new(proc: &'static Option<&'static dyn Process>) -> MLFQProcessNode<'a> {
49        MLFQProcessNode {
50            proc,
51            state: MfProcState::default(),
52            next: ListLink::empty(),
53        }
54    }
55}
56
57impl<'a> ListNode<'a, MLFQProcessNode<'a>> for MLFQProcessNode<'a> {
58    fn next(&'a self) -> &'a ListLink<'a, MLFQProcessNode<'a>> {
59        &self.next
60    }
61}
62
63pub struct MLFQSched<'a, A: 'static + time::Alarm<'static>> {
64    alarm: &'static A,
65    pub processes: [List<'a, MLFQProcessNode<'a>>; 3], // Using Self::NUM_QUEUES causes rustc to crash..
66    next_reset: Cell<A::Ticks>,
67    last_reset_check: Cell<A::Ticks>,
68    last_timeslice: Cell<u32>,
69    last_queue_idx: Cell<usize>,
70}
71
72impl<'a, A: 'static + time::Alarm<'static>> MLFQSched<'a, A> {
73    /// How often to restore all processes to max priority
74    pub const PRIORITY_REFRESH_PERIOD_MS: u32 = 5000;
75    pub const NUM_QUEUES: usize = 3;
76
77    pub fn new(alarm: &'static A) -> Self {
78        Self {
79            alarm,
80            processes: [List::new(), List::new(), List::new()],
81            next_reset: Cell::new(A::Ticks::from(0)),
82            last_reset_check: Cell::new(A::Ticks::from(0)),
83            last_timeslice: Cell::new(0),
84            last_queue_idx: Cell::new(0),
85        }
86    }
87
88    fn get_timeslice_us(&self, queue_idx: usize) -> u32 {
89        match queue_idx {
90            0 => 10000,
91            1 => 20000,
92            2 => 50000,
93            _ => panic!("invalid queue idx"),
94        }
95    }
96
97    fn redeem_all_procs(&self) {
98        for queue in self.processes.iter().skip(1) {
99            if let Some(proc) = queue.pop_head() {
100                self.processes[0].push_tail(proc)
101            }
102        }
103    }
104
105    /// Returns the process at the head of the highest priority queue containing a process
106    /// that is ready to execute (as determined by `has_tasks()`)
107    /// This method moves that node to the head of its queue.
108    fn get_next_ready_process_node(&self) -> (Option<&MLFQProcessNode<'a>>, usize) {
109        for (idx, queue) in self.processes.iter().enumerate() {
110            let next = queue
111                .iter()
112                .find(|node_ref| node_ref.proc.is_some_and(|proc| proc.ready()));
113            if next.is_some() {
114                // pop procs to back until we get to match
115                loop {
116                    let cur = queue.pop_head();
117                    if let Some(node) = cur {
118                        if core::ptr::eq(node, next.unwrap()) {
119                            queue.push_head(node);
120                            // match! Put back on front
121                            return (next, idx);
122                        } else {
123                            queue.push_tail(node);
124                        }
125                    }
126                }
127            }
128        }
129        (None, 0)
130    }
131}
132
133impl<A: 'static + time::Alarm<'static>, C: Chip> Scheduler<C> for MLFQSched<'_, A> {
134    fn next(&self) -> SchedulingDecision {
135        let now = self.alarm.now();
136        let next_reset = self.next_reset.get();
137        let last_reset_check = self.last_reset_check.get();
138
139        // storing last reset check is necessary to avoid missing a reset when the underlying
140        // alarm wraps around
141        if !now.within_range(last_reset_check, next_reset) {
142            // Promote all processes to highest priority queue
143            self.next_reset
144                .set(now.wrapping_add(self.alarm.ticks_from_ms(Self::PRIORITY_REFRESH_PERIOD_MS)));
145            self.redeem_all_procs();
146        }
147        self.last_reset_check.set(now);
148        let (node_ref_opt, queue_idx) = self.get_next_ready_process_node();
149        if node_ref_opt.is_none() {
150            return SchedulingDecision::TrySleep;
151        }
152        let node_ref = node_ref_opt.unwrap();
153        let timeslice = self.get_timeslice_us(queue_idx) - node_ref.state.us_used_this_queue.get();
154        let next = node_ref.proc.unwrap().processid();
155        self.last_queue_idx.set(queue_idx);
156        self.last_timeslice.set(timeslice);
157
158        SchedulingDecision::RunProcess((next, NonZeroU32::new(timeslice)))
159    }
160
161    fn result(&self, result: StoppedExecutingReason, execution_time_us: Option<u32>) {
162        let execution_time_us = execution_time_us.unwrap(); // should never fail as we never run cooperatively
163        let queue_idx = self.last_queue_idx.get();
164        // Last executed node will always be at head of its queue
165        let node_ref = self.processes[queue_idx].head().unwrap();
166        node_ref
167            .state
168            .us_used_this_queue
169            .set(self.last_timeslice.get() - execution_time_us);
170
171        let punish = result == StoppedExecutingReason::TimesliceExpired;
172        if punish {
173            node_ref.state.us_used_this_queue.set(0);
174            let next_queue = if queue_idx == Self::NUM_QUEUES - 1 {
175                queue_idx
176            } else {
177                queue_idx + 1
178            };
179            self.processes[next_queue].push_tail(self.processes[queue_idx].pop_head().unwrap());
180        } else {
181            self.processes[queue_idx].push_tail(self.processes[queue_idx].pop_head().unwrap());
182        }
183    }
184}