Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[refactor] add set_cpumask apis, seperate CurrentRunQueueRef, add put_task_with_state #183

Merged
merged 18 commits into from
Oct 17, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions modules/axtask/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,22 @@ pub fn set_priority(prio: isize) -> bool {
current_run_queue::<NoPreemptIrqSave>().set_current_priority(prio)
}

/// Set the affinity for the current task.
/// [`cpumask::CpuMask`] is used to specify the CPU affinity.
/// Returns `true` if the affinity is set successfully.
///
/// TODO: support set the affinity for other tasks.
pub fn set_current_affinity(cpumask: CpuMask) -> bool {
equation314 marked this conversation as resolved.
Show resolved Hide resolved
if cpumask.is_empty() {
false
} else {
current().set_cpumask(cpumask);
hky1999 marked this conversation as resolved.
Show resolved Hide resolved
// After setting the affinity, we need to migrate the task make it work.
current_run_queue::<NoPreemptIrqSave>().migrate_current();
equation314 marked this conversation as resolved.
Show resolved Hide resolved
true
}
}

/// Current task gives up the CPU time voluntarily, and switches to another
/// ready task.
pub fn yield_now() {
Expand Down
89 changes: 53 additions & 36 deletions modules/axtask/src/run_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use alloc::collections::VecDeque;
use alloc::sync::Arc;
use core::mem::MaybeUninit;

use kernel_guard::BaseGuard;
use kernel_guard::{BaseGuard, NoOp};
use kspin::SpinRaw;
use lazyinit::LazyInit;
use scheduler::BaseScheduler;
Expand Down Expand Up @@ -209,7 +209,24 @@ impl<'a, G: BaseGuard> AxRunQueueRef<'a, G> {
let curr = crate::current();
trace!("task yield: {}", curr.id_name());
assert!(curr.is_running());
self.inner.resched(false);
self.inner.put_prev(false);
self.inner.resched();
}

pub fn migrate_current(&mut self) {
let curr = crate::current();
trace!("task migrate: {}", curr.id_name());

assert!(curr.is_running());
curr.set_state(TaskState::Ready);

select_run_queue::<NoOp>(curr.as_task_ref())
.inner
.scheduler
.lock()
.put_prev_task(curr.clone(), false);

self.inner.resched();
}

pub fn set_current_priority(&mut self, prio: isize) -> bool {
Expand Down Expand Up @@ -238,7 +255,8 @@ impl<'a, G: BaseGuard> AxRunQueueRef<'a, G> {
can_preempt
);
if can_preempt {
self.inner.resched(true);
self.inner.put_prev(true);
equation314 marked this conversation as resolved.
Show resolved Hide resolved
self.inner.resched();
} else {
curr.set_preempt_pending(true);
}
Expand Down Expand Up @@ -274,7 +292,7 @@ impl<'a, G: BaseGuard> AxRunQueueRef<'a, G> {
}

// Schedule to next task.
self.inner.resched(false);
self.inner.resched();
}
unreachable!("task exited!");
}
Expand Down Expand Up @@ -310,7 +328,7 @@ impl<'a, G: BaseGuard> AxRunQueueRef<'a, G> {
// see `unblock_task()` for details.

debug!("task block: {}", curr.id_name());
self.inner.resched(false);
self.inner.resched();
}

/// Unblock one task by inserting it into the run queue.
Expand All @@ -322,27 +340,6 @@ impl<'a, G: BaseGuard> AxRunQueueRef<'a, G> {
// otherwise, the task is already unblocked by other cores.
if task.transition_state(TaskState::Blocked, TaskState::Ready) {
// Since now, the task to be unblocked is in the `Ready` state.

// If the owning (remote) CPU is still in the middle of schedule() with
// this task as prev, wait until it's done referencing the task.
//
// Pairs with the `clear_prev_task_on_cpu()`.
//
// This ensures that tasks getting woken will be fully ordered against
// their previous state and preserve Program Order.
//
// Note:
// 1. This should be placed after the judgement of `TaskState::Blocked,`,
// because the task may have been woken up by other cores.
// 2. This can be placed in the front of `switch_to()`
#[cfg(feature = "smp")]
while task.on_cpu() {
// Wait for the task to finish its scheduling process.
core::hint::spin_loop();
}
#[cfg(feature = "smp")]
assert!(!task.on_cpu());

let cpu_id = self.inner.cpu_id;
debug!("task unblock: {} on run_queue {}", task.id_name(), cpu_id);
self.inner
Expand Down Expand Up @@ -370,7 +367,7 @@ impl<'a, G: BaseGuard> AxRunQueueRef<'a, G> {
if now < deadline {
crate::timers::set_alarm_wakeup(deadline, curr.clone());
curr.set_state(TaskState::Blocked);
self.inner.resched(false);
self.inner.resched();
}
}
}
Expand All @@ -391,17 +388,21 @@ impl AxRunQueue {
}
}

/// Common reschedule subroutine. If `preempt`, keep current task's time
/// slice, otherwise reset it.
fn resched(&mut self, preempt: bool) {
/// Common reschedule subroutine.
/// Put the current task into the run queue.
/// If `preempt`, keep current task's time slice, otherwise reset it.
fn put_prev(&mut self, preempt: bool) {
let prev = crate::current();
if prev.is_running() {
prev.set_state(TaskState::Ready);
if !prev.is_idle() {
self.scheduler.lock().put_prev_task(prev.clone(), preempt);
}
// If the current task is `Running`, set its state to `Ready` and
// put it back to the run queue (except idle task).
if prev.transition_state(TaskState::Running, TaskState::Ready) && !prev.is_idle() {
self.scheduler.lock().put_prev_task(prev.clone(), preempt);
}
}

/// Core reschedule subroutine.
/// Pick the next task to run and switch to it.
fn resched(&mut self) {
let next = self
.scheduler
.lock()
Expand All @@ -416,7 +417,7 @@ impl AxRunQueue {
next.id_name(),
next.state()
);
self.switch_to(prev, next);
self.switch_to(crate::current(), next);
}

fn switch_to(&mut self, prev_task: CurrentTask, next_task: AxTaskRef) {
Expand All @@ -438,6 +439,22 @@ impl AxRunQueue {
return;
}

// Task must be scheduled atomically, wait for next task's scheduling process to complete.
// If the owning (remote) CPU is still in the middle of schedule() with
// this task (next task) as prev, wait until it's done referencing the task.
//
// Pairs with the `clear_prev_task_on_cpu()`.
//
// Note:
// 1. This should be placed after the judgement of `TaskState::Blocked,`,
// because the task may have been woken up by other cores.
// 2. This can be placed in the front of `switch_to()`
#[cfg(feature = "smp")]
while next_task.on_cpu() {
equation314 marked this conversation as resolved.
Show resolved Hide resolved
// Wait for the task to finish its scheduling process.
core::hint::spin_loop();
}

// Claim the task as running, we do this before switching to it
// such that any running task will have this set.
#[cfg(feature = "smp")]
Expand Down
58 changes: 32 additions & 26 deletions modules/axtask/src/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,38 @@ impl TaskInner {
None
}
}

/// Returns a mutable reference to the task context.
#[inline]
pub const fn ctx_mut(&mut self) -> &mut TaskContext {
self.ctx.get_mut()
}

/// Returns the top address of the kernel stack.
#[inline]
pub const fn kernel_stack_top(&self) -> Option<VirtAddr> {
match &self.kstack {
Some(s) => Some(s.top()),
None => None,
}
}

/// Gets the cpu affinity mask of the task.
///
/// Returns the cpu affinity mask of the task in type [`cpumask::CpuMask`].
#[inline]
pub fn cpumask(&self) -> CpuMask {
*self.cpumask.lock()
}

/// Sets the cpu affinity mask of the task.
///
/// # Arguments
/// `cpumask` - The cpu affinity mask to be set in type [`cpumask::CpuMask`].
#[inline]
pub fn set_cpumask(&self, cpumask: CpuMask) {
*self.cpumask.lock() = cpumask
}
equation314 marked this conversation as resolved.
Show resolved Hide resolved
}

// private methods
Expand Down Expand Up @@ -285,17 +317,6 @@ impl TaskInner {
self.is_idle
}

#[allow(unused)]
#[inline]
pub(crate) fn cpumask(&self) -> CpuMask {
*self.cpumask.lock()
}

#[inline]
pub(crate) fn set_cpumask(&self, cpumask: CpuMask) {
*self.cpumask.lock() = cpumask
}

#[inline]
pub(crate) fn in_wait_queue(&self) -> bool {
self.in_wait_queue.load(Ordering::Acquire)
Expand Down Expand Up @@ -381,12 +402,6 @@ impl TaskInner {
self.ctx.get()
}

/// Returns a mutable reference to the task context.
#[inline]
pub const fn ctx_mut(&mut self) -> &mut TaskContext {
self.ctx.get_mut()
}

/// Returns whether the task is running on a CPU.
///
/// It is used to protect the task from being moved to a different run queue
Expand Down Expand Up @@ -436,15 +451,6 @@ impl TaskInner {
.expect("Invalid prev_task pointer or prev_task has been dropped")
.set_on_cpu(false);
}

/// Returns the top address of the kernel stack.
#[inline]
pub const fn kernel_stack_top(&self) -> Option<VirtAddr> {
match &self.kstack {
Some(s) => Some(s.top()),
None => None,
}
}
}

impl fmt::Debug for TaskInner {
Expand Down
Loading