tokio/runtime/scheduler/multi_thread/
worker.rs

1//! A scheduler is initialized with a fixed number of workers. Each worker is
2//! driven by a thread. Each worker has a "core" which contains data such as the
3//! run queue and other state. When `block_in_place` is called, the worker's
4//! "core" is handed off to a new thread allowing the scheduler to continue to
5//! make progress while the originating thread blocks.
6//!
7//! # Shutdown
8//!
9//! Shutting down the runtime involves the following steps:
10//!
11//!  1. The Shared::close method is called. This closes the inject queue and
12//!     `OwnedTasks` instance and wakes up all worker threads.
13//!
14//!  2. Each worker thread observes the close signal next time it runs
15//!     Core::maintenance by checking whether the inject queue is closed.
16//!     The `Core::is_shutdown` flag is set to true.
17//!
18//!  3. The worker thread calls `pre_shutdown` in parallel. Here, the worker
19//!     will keep removing tasks from `OwnedTasks` until it is empty. No new
20//!     tasks can be pushed to the `OwnedTasks` during or after this step as it
21//!     was closed in step 1.
22//!
23//!  5. The workers call Shared::shutdown to enter the single-threaded phase of
24//!     shutdown. These calls will push their core to `Shared::shutdown_cores`,
25//!     and the last thread to push its core will finish the shutdown procedure.
26//!
27//!  6. The local run queue of each core is emptied, then the inject queue is
28//!     emptied.
29//!
30//! At this point, shutdown has completed. It is not possible for any of the
31//! collections to contain any tasks at this point, as each collection was
32//! closed first, then emptied afterwards.
33//!
34//! ## Spawns during shutdown
35//!
36//! When spawning tasks during shutdown, there are two cases:
37//!
38//!  * The spawner observes the `OwnedTasks` being open, and the inject queue is
39//!    closed.
40//!  * The spawner observes the `OwnedTasks` being closed and doesn't check the
41//!    inject queue.
42//!
43//! The first case can only happen if the `OwnedTasks::bind` call happens before
44//! or during step 1 of shutdown. In this case, the runtime will clean up the
45//! task in step 3 of shutdown.
46//!
47//! In the latter case, the task was not spawned and the task is immediately
48//! cancelled by the spawner.
49//!
50//! The correctness of shutdown requires both the inject queue and `OwnedTasks`
51//! collection to have a closed bit. With a close bit on only the inject queue,
52//! spawning could run in to a situation where a task is successfully bound long
53//! after the runtime has shut down. With a close bit on only the `OwnedTasks`,
54//! the first spawning situation could result in the notification being pushed
55//! to the inject queue after step 6 of shutdown, which would leave a task in
56//! the inject queue indefinitely. This would be a ref-count cycle and a memory
57//! leak.
58
59use crate::loom::sync::{Arc, Mutex};
60use crate::runtime;
61use crate::runtime::scheduler::multi_thread::{
62    idle, queue, Counters, Handle, Idle, Overflow, Parker, Stats, TraceStatus, Unparker,
63};
64use crate::runtime::scheduler::{inject, Defer, Lock};
65use crate::runtime::task::OwnedTasks;
66use crate::runtime::{
67    blocking, driver, scheduler, task, Config, SchedulerMetrics, TimerFlavor, WorkerMetrics,
68};
69use crate::runtime::{context, TaskHooks};
70use crate::task::coop;
71use crate::util::atomic_cell::AtomicCell;
72use crate::util::rand::{FastRand, RngSeedGenerator};
73
74use std::cell::RefCell;
75use std::task::Waker;
76use std::thread;
77use std::time::Duration;
78
79mod metrics;
80
81cfg_taskdump! {
82    mod taskdump;
83}
84
85cfg_not_taskdump! {
86    mod taskdump_mock;
87}
88
89#[cfg(all(tokio_unstable, feature = "time"))]
90use crate::loom::sync::atomic::AtomicBool;
91
92#[cfg(all(tokio_unstable, feature = "time"))]
93use crate::runtime::time_alt;
94
95#[cfg(all(tokio_unstable, feature = "time"))]
96use crate::runtime::scheduler::util;
97
98/// A scheduler worker
99pub(super) struct Worker {
100    /// Reference to scheduler's handle
101    handle: Arc<Handle>,
102
103    /// Index holding this worker's remote state
104    index: usize,
105
106    /// Used to hand-off a worker's core to another thread.
107    core: AtomicCell<Core>,
108}
109
110/// Core data
111struct Core {
112    /// Used to schedule bookkeeping tasks every so often.
113    tick: u32,
114
115    /// When a task is scheduled from a worker, it is stored in this slot. The
116    /// worker will check this slot for a task **before** checking the run
117    /// queue. This effectively results in the **last** scheduled task to be run
118    /// next (LIFO). This is an optimization for improving locality which
119    /// benefits message passing patterns and helps to reduce latency.
120    lifo_slot: Option<Notified>,
121
122    /// When `true`, locally scheduled tasks go to the LIFO slot. When `false`,
123    /// they go to the back of the `run_queue`.
124    lifo_enabled: bool,
125
126    /// The worker-local run queue.
127    run_queue: queue::Local<Arc<Handle>>,
128
129    #[cfg(all(tokio_unstable, feature = "time"))]
130    time_context: time_alt::LocalContext,
131
132    /// True if the worker is currently searching for more work. Searching
133    /// involves attempting to steal from other workers.
134    is_searching: bool,
135
136    /// True if the scheduler is being shutdown
137    is_shutdown: bool,
138
139    /// True if the scheduler is being traced
140    is_traced: bool,
141
142    /// Parker
143    ///
144    /// Stored in an `Option` as the parker is added / removed to make the
145    /// borrow checker happy.
146    park: Option<Parker>,
147
148    /// Per-worker runtime stats
149    stats: Stats,
150
151    /// How often to check the global queue
152    global_queue_interval: u32,
153
154    /// Fast random number generator.
155    rand: FastRand,
156}
157
158/// State shared across all workers
159pub(crate) struct Shared {
160    /// Per-worker remote state. All other workers have access to this and is
161    /// how they communicate between each other.
162    remotes: Box<[Remote]>,
163
164    /// Global task queue used for:
165    ///  1. Submit work to the scheduler while **not** currently on a worker thread.
166    ///  2. Submit work to the scheduler when a worker run queue is saturated
167    pub(super) inject: inject::Shared<Arc<Handle>>,
168
169    /// Coordinates idle workers
170    idle: Idle,
171
172    /// Collection of all active tasks spawned onto this executor.
173    pub(crate) owned: OwnedTasks<Arc<Handle>>,
174
175    /// Data synchronized by the scheduler mutex
176    pub(super) synced: Mutex<Synced>,
177
178    /// Cores that have observed the shutdown signal
179    ///
180    /// The core is **not** placed back in the worker to avoid it from being
181    /// stolen by a thread that was spawned as part of `block_in_place`.
182    #[allow(clippy::vec_box)] // we're moving an already-boxed value
183    shutdown_cores: Mutex<Vec<Box<Core>>>,
184
185    /// The number of cores that have observed the trace signal.
186    pub(super) trace_status: TraceStatus,
187
188    /// Scheduler configuration options
189    config: Config,
190
191    /// Collects metrics from the runtime.
192    pub(super) scheduler_metrics: SchedulerMetrics,
193
194    pub(super) worker_metrics: Box<[WorkerMetrics]>,
195
196    /// Only held to trigger some code on drop. This is used to get internal
197    /// runtime metrics that can be useful when doing performance
198    /// investigations. This does nothing (empty struct, no drop impl) unless
199    /// the `tokio_internal_mt_counters` `cfg` flag is set.
200    _counters: Counters,
201}
202
203/// Data synchronized by the scheduler mutex
204pub(crate) struct Synced {
205    /// Synchronized state for `Idle`.
206    pub(super) idle: idle::Synced,
207
208    /// Synchronized state for `Inject`.
209    pub(crate) inject: inject::Synced,
210
211    #[cfg(all(tokio_unstable, feature = "time"))]
212    /// Timers pending to be registered.
213    /// This is used to register a timer but the [`Core`]
214    /// is not available in the current thread.
215    inject_timers: Vec<time_alt::EntryHandle>,
216}
217
218/// Used to communicate with a worker from other threads.
219struct Remote {
220    /// Steals tasks from this worker.
221    pub(super) steal: queue::Steal<Arc<Handle>>,
222
223    /// Unparks the associated worker thread
224    unpark: Unparker,
225}
226
227/// Thread-local context
228pub(crate) struct Context {
229    /// Worker
230    worker: Arc<Worker>,
231
232    /// Core data
233    core: RefCell<Option<Box<Core>>>,
234
235    /// Tasks to wake after resource drivers are polled. This is mostly to
236    /// handle yielded tasks.
237    pub(crate) defer: Defer,
238}
239
240/// Starts the workers
241pub(crate) struct Launch(Vec<Arc<Worker>>);
242
243/// Running a task may consume the core. If the core is still available when
244/// running the task completes, it is returned. Otherwise, the worker will need
245/// to stop processing.
246type RunResult = Result<Box<Core>, ()>;
247
248/// A notified task handle
249type Notified = task::Notified<Arc<Handle>>;
250
251/// Value picked out of thin-air. Running the LIFO slot a handful of times
252/// seems sufficient to benefit from locality. More than 3 times probably is
253/// over-weighting. The value can be tuned in the future with data that shows
254/// improvements.
255const MAX_LIFO_POLLS_PER_TICK: usize = 3;
256
257pub(super) fn create(
258    size: usize,
259    park: Parker,
260    driver_handle: driver::Handle,
261    blocking_spawner: blocking::Spawner,
262    seed_generator: RngSeedGenerator,
263    config: Config,
264    timer_flavor: TimerFlavor,
265) -> (Arc<Handle>, Launch) {
266    let mut cores = Vec::with_capacity(size);
267    let mut remotes = Vec::with_capacity(size);
268    let mut worker_metrics = Vec::with_capacity(size);
269
270    // Create the local queues
271    for _ in 0..size {
272        let (steal, run_queue) = queue::local();
273
274        let park = park.clone();
275        let unpark = park.unpark();
276        let metrics = WorkerMetrics::from_config(&config);
277        let stats = Stats::new(&metrics);
278
279        cores.push(Box::new(Core {
280            tick: 0,
281            lifo_slot: None,
282            lifo_enabled: !config.disable_lifo_slot,
283            run_queue,
284            #[cfg(all(tokio_unstable, feature = "time"))]
285            time_context: time_alt::LocalContext::new(),
286            is_searching: false,
287            is_shutdown: false,
288            is_traced: false,
289            park: Some(park),
290            global_queue_interval: stats.tuned_global_queue_interval(&config),
291            stats,
292            rand: FastRand::from_seed(config.seed_generator.next_seed()),
293        }));
294
295        remotes.push(Remote { steal, unpark });
296        worker_metrics.push(metrics);
297    }
298
299    let (idle, idle_synced) = Idle::new(size);
300    let (inject, inject_synced) = inject::Shared::new();
301
302    let remotes_len = remotes.len();
303    let handle = Arc::new(Handle {
304        task_hooks: TaskHooks::from_config(&config),
305        shared: Shared {
306            remotes: remotes.into_boxed_slice(),
307            inject,
308            idle,
309            owned: OwnedTasks::new(size),
310            synced: Mutex::new(Synced {
311                idle: idle_synced,
312                inject: inject_synced,
313                #[cfg(all(tokio_unstable, feature = "time"))]
314                inject_timers: Vec::new(),
315            }),
316            shutdown_cores: Mutex::new(vec![]),
317            trace_status: TraceStatus::new(remotes_len),
318            config,
319            scheduler_metrics: SchedulerMetrics::new(),
320            worker_metrics: worker_metrics.into_boxed_slice(),
321            _counters: Counters,
322        },
323        driver: driver_handle,
324        blocking_spawner,
325        seed_generator,
326        timer_flavor,
327        #[cfg(all(tokio_unstable, feature = "time"))]
328        is_shutdown: AtomicBool::new(false),
329    });
330
331    let mut launch = Launch(vec![]);
332
333    for (index, core) in cores.drain(..).enumerate() {
334        launch.0.push(Arc::new(Worker {
335            handle: handle.clone(),
336            index,
337            core: AtomicCell::new(Some(core)),
338        }));
339    }
340
341    (handle, launch)
342}
343
344#[track_caller]
345pub(crate) fn block_in_place<F, R>(f: F) -> R
346where
347    F: FnOnce() -> R,
348{
349    // Try to steal the worker core back
350    struct Reset {
351        take_core: bool,
352        budget: coop::Budget,
353    }
354
355    impl Drop for Reset {
356        fn drop(&mut self) {
357            with_current(|maybe_cx| {
358                if let Some(cx) = maybe_cx {
359                    if self.take_core {
360                        let core = cx.worker.core.take();
361
362                        if core.is_some() {
363                            cx.worker.handle.shared.worker_metrics[cx.worker.index]
364                                .set_thread_id(thread::current().id());
365                        }
366
367                        let mut cx_core = cx.core.borrow_mut();
368                        assert!(cx_core.is_none());
369                        *cx_core = core;
370                    }
371
372                    // Reset the task budget as we are re-entering the
373                    // runtime.
374                    coop::set(self.budget);
375                }
376            });
377        }
378    }
379
380    let mut had_entered = false;
381    let mut take_core = false;
382
383    let setup_result = with_current(|maybe_cx| {
384        match (
385            crate::runtime::context::current_enter_context(),
386            maybe_cx.is_some(),
387        ) {
388            (context::EnterRuntime::Entered { .. }, true) => {
389                // We are on a thread pool runtime thread, so we just need to
390                // set up blocking.
391                had_entered = true;
392            }
393            (
394                context::EnterRuntime::Entered {
395                    allow_block_in_place,
396                },
397                false,
398            ) => {
399                // We are on an executor, but _not_ on the thread pool.  That is
400                // _only_ okay if we are in a thread pool runtime's block_on
401                // method:
402                if allow_block_in_place {
403                    had_entered = true;
404                    return Ok(());
405                } else {
406                    // This probably means we are on the current_thread runtime or in a
407                    // LocalSet, where it is _not_ okay to block.
408                    return Err(
409                        "can call blocking only when running on the multi-threaded runtime",
410                    );
411                }
412            }
413            (context::EnterRuntime::NotEntered, true) => {
414                // This is a nested call to block_in_place (we already exited).
415                // All the necessary setup has already been done.
416                return Ok(());
417            }
418            (context::EnterRuntime::NotEntered, false) => {
419                // We are outside of the tokio runtime, so blocking is fine.
420                // We can also skip all of the thread pool blocking setup steps.
421                return Ok(());
422            }
423        }
424
425        let cx = maybe_cx.expect("no .is_some() == false cases above should lead here");
426
427        // Get the worker core. If none is set, then blocking is fine!
428        let mut core = match cx.core.borrow_mut().take() {
429            Some(core) => core,
430            None => return Ok(()),
431        };
432
433        // If we heavily call `spawn_blocking`, there might be no available thread to
434        // run this core. Except for the task in the lifo_slot, all tasks can be
435        // stolen, so we move the task out of the lifo_slot to the run_queue.
436        if let Some(task) = core.lifo_slot.take() {
437            core.run_queue
438                .push_back_or_overflow(task, &*cx.worker.handle, &mut core.stats);
439        }
440
441        // We are taking the core from the context and sending it to another
442        // thread.
443        take_core = true;
444
445        // The parker should be set here
446        assert!(core.park.is_some());
447
448        // In order to block, the core must be sent to another thread for
449        // execution.
450        //
451        // First, move the core back into the worker's shared core slot.
452        cx.worker.core.set(core);
453
454        // Next, clone the worker handle and send it to a new thread for
455        // processing.
456        //
457        // Once the blocking task is done executing, we will attempt to
458        // steal the core back.
459        let worker = cx.worker.clone();
460        runtime::spawn_blocking(move || run(worker));
461        Ok(())
462    });
463
464    if let Err(panic_message) = setup_result {
465        panic!("{}", panic_message);
466    }
467
468    if had_entered {
469        // Unset the current task's budget. Blocking sections are not
470        // constrained by task budgets.
471        let _reset = Reset {
472            take_core,
473            budget: coop::stop(),
474        };
475
476        crate::runtime::context::exit_runtime(f)
477    } else {
478        f()
479    }
480}
481
482impl Launch {
483    pub(crate) fn launch(mut self) {
484        for worker in self.0.drain(..) {
485            runtime::spawn_blocking(move || run(worker));
486        }
487    }
488}
489
490fn run(worker: Arc<Worker>) {
491    #[allow(dead_code)]
492    struct AbortOnPanic;
493
494    impl Drop for AbortOnPanic {
495        fn drop(&mut self) {
496            if std::thread::panicking() {
497                eprintln!("worker thread panicking; aborting process");
498                std::process::abort();
499            }
500        }
501    }
502
503    // Catching panics on worker threads in tests is quite tricky. Instead, when
504    // debug assertions are enabled, we just abort the process.
505    #[cfg(debug_assertions)]
506    let _abort_on_panic = AbortOnPanic;
507
508    // Acquire a core. If this fails, then another thread is running this
509    // worker and there is nothing further to do.
510    let core = match worker.core.take() {
511        Some(core) => core,
512        None => return,
513    };
514
515    worker.handle.shared.worker_metrics[worker.index].set_thread_id(thread::current().id());
516
517    let handle = scheduler::Handle::MultiThread(worker.handle.clone());
518
519    crate::runtime::context::enter_runtime(&handle, true, |_| {
520        // Set the worker context.
521        let cx = scheduler::Context::MultiThread(Context {
522            worker,
523            core: RefCell::new(None),
524            defer: Defer::new(),
525        });
526
527        context::set_scheduler(&cx, || {
528            let cx = cx.expect_multi_thread();
529
530            // This should always be an error. It only returns a `Result` to support
531            // using `?` to short circuit.
532            assert!(cx.run(core).is_err());
533
534            // Check if there are any deferred tasks to notify. This can happen when
535            // the worker core is lost due to `block_in_place()` being called from
536            // within the task.
537            cx.defer.wake();
538        });
539    });
540}
541
542impl Context {
543    fn run(&self, mut core: Box<Core>) -> RunResult {
544        // Reset `lifo_enabled` here in case the core was previously stolen from
545        // a task that had the LIFO slot disabled.
546        self.reset_lifo_enabled(&mut core);
547
548        // Start as "processing" tasks as polling tasks from the local queue
549        // will be one of the first things we do.
550        core.stats.start_processing_scheduled_tasks();
551
552        while !core.is_shutdown {
553            self.assert_lifo_enabled_is_correct(&core);
554
555            if core.is_traced {
556                core = self.worker.handle.trace_core(core);
557            }
558
559            // Increment the tick
560            core.tick();
561
562            // Run maintenance, if needed
563            core = self.maintenance(core);
564
565            // First, check work available to the current worker.
566            if let Some(task) = core.next_task(&self.worker) {
567                core = self.run_task(task, core)?;
568                continue;
569            }
570
571            // We consumed all work in the queues and will start searching for work.
572            core.stats.end_processing_scheduled_tasks();
573
574            // There is no more **local** work to process, try to steal work
575            // from other workers.
576            if let Some(task) = core.steal_work(&self.worker) {
577                // Found work, switch back to processing
578                core.stats.start_processing_scheduled_tasks();
579                core = self.run_task(task, core)?;
580            } else {
581                // Wait for work
582                core = if !self.defer.is_empty() {
583                    self.park_yield(core)
584                } else {
585                    self.park(core)
586                };
587                core.stats.start_processing_scheduled_tasks();
588            }
589        }
590
591        #[cfg(all(tokio_unstable, feature = "time"))]
592        {
593            match self.worker.handle.timer_flavor {
594                TimerFlavor::Traditional => {}
595                TimerFlavor::Alternative => {
596                    util::time_alt::shutdown_local_timers(
597                        &mut core.time_context.wheel,
598                        &mut core.time_context.canc_rx,
599                        self.worker.handle.take_remote_timers(),
600                        &self.worker.handle.driver,
601                    );
602                }
603            }
604        }
605
606        core.pre_shutdown(&self.worker);
607        // Signal shutdown
608        self.worker.handle.shutdown_core(core);
609        Err(())
610    }
611
612    fn run_task(&self, task: Notified, mut core: Box<Core>) -> RunResult {
613        #[cfg(tokio_unstable)]
614        let task_meta = task.task_meta();
615
616        let task = self.worker.handle.shared.owned.assert_owner(task);
617
618        // Make sure the worker is not in the **searching** state. This enables
619        // another idle worker to try to steal work.
620        core.transition_from_searching(&self.worker);
621
622        self.assert_lifo_enabled_is_correct(&core);
623
624        // Measure the poll start time. Note that we may end up polling other
625        // tasks under this measurement. In this case, the tasks came from the
626        // LIFO slot and are considered part of the current task for scheduling
627        // purposes. These tasks inherent the "parent"'s limits.
628        core.stats.start_poll();
629
630        // Make the core available to the runtime context
631        *self.core.borrow_mut() = Some(core);
632
633        // Run the task
634        coop::budget(|| {
635            // Unlike the poll time above, poll start callback is attached to the task id,
636            // so it is tightly associated with the actual poll invocation.
637            #[cfg(tokio_unstable)]
638            self.worker
639                .handle
640                .task_hooks
641                .poll_start_callback(&task_meta);
642
643            task.run();
644
645            #[cfg(tokio_unstable)]
646            self.worker.handle.task_hooks.poll_stop_callback(&task_meta);
647
648            let mut lifo_polls = 0;
649
650            // As long as there is budget remaining and a task exists in the
651            // `lifo_slot`, then keep running.
652            loop {
653                // Check if we still have the core. If not, the core was stolen
654                // by another worker.
655                let mut core = match self.core.borrow_mut().take() {
656                    Some(core) => core,
657                    None => {
658                        // In this case, we cannot call `reset_lifo_enabled()`
659                        // because the core was stolen. The stealer will handle
660                        // that at the top of `Context::run`
661                        return Err(());
662                    }
663                };
664
665                // Check for a task in the LIFO slot
666                let task = match core.lifo_slot.take() {
667                    Some(task) => task,
668                    None => {
669                        self.reset_lifo_enabled(&mut core);
670                        core.stats.end_poll();
671                        return Ok(core);
672                    }
673                };
674
675                if !coop::has_budget_remaining() {
676                    core.stats.end_poll();
677
678                    // Not enough budget left to run the LIFO task, push it to
679                    // the back of the queue and return.
680                    core.run_queue.push_back_or_overflow(
681                        task,
682                        &*self.worker.handle,
683                        &mut core.stats,
684                    );
685                    // If we hit this point, the LIFO slot should be enabled.
686                    // There is no need to reset it.
687                    debug_assert!(core.lifo_enabled);
688                    return Ok(core);
689                }
690
691                // Track that we are about to run a task from the LIFO slot.
692                lifo_polls += 1;
693                super::counters::inc_lifo_schedules();
694
695                // Disable the LIFO slot if we reach our limit
696                //
697                // In ping-ping style workloads where task A notifies task B,
698                // which notifies task A again, continuously prioritizing the
699                // LIFO slot can cause starvation as these two tasks will
700                // repeatedly schedule the other. To mitigate this, we limit the
701                // number of times the LIFO slot is prioritized.
702                if lifo_polls >= MAX_LIFO_POLLS_PER_TICK {
703                    core.lifo_enabled = false;
704                    super::counters::inc_lifo_capped();
705                }
706
707                // Run the LIFO task, then loop
708                *self.core.borrow_mut() = Some(core);
709                let task = self.worker.handle.shared.owned.assert_owner(task);
710
711                #[cfg(tokio_unstable)]
712                let task_meta = task.task_meta();
713
714                #[cfg(tokio_unstable)]
715                self.worker
716                    .handle
717                    .task_hooks
718                    .poll_start_callback(&task_meta);
719
720                task.run();
721
722                #[cfg(tokio_unstable)]
723                self.worker.handle.task_hooks.poll_stop_callback(&task_meta);
724            }
725        })
726    }
727
728    fn reset_lifo_enabled(&self, core: &mut Core) {
729        core.lifo_enabled = !self.worker.handle.shared.config.disable_lifo_slot;
730    }
731
732    fn assert_lifo_enabled_is_correct(&self, core: &Core) {
733        debug_assert_eq!(
734            core.lifo_enabled,
735            !self.worker.handle.shared.config.disable_lifo_slot
736        );
737    }
738
739    fn maintenance(&self, mut core: Box<Core>) -> Box<Core> {
740        if core.tick % self.worker.handle.shared.config.event_interval == 0 {
741            super::counters::inc_num_maintenance();
742
743            core.stats.end_processing_scheduled_tasks();
744
745            // Call `park` with a 0 timeout. This enables the I/O driver, timer, ...
746            // to run without actually putting the thread to sleep.
747            core = self.park_yield(core);
748
749            // Run regularly scheduled maintenance
750            core.maintenance(&self.worker);
751
752            core.stats.start_processing_scheduled_tasks();
753        }
754
755        core
756    }
757
758    /// Parks the worker thread while waiting for tasks to execute.
759    ///
760    /// This function checks if indeed there's no more work left to be done before parking.
761    /// Also important to notice that, before parking, the worker thread will try to take
762    /// ownership of the Driver (IO/Time) and dispatch any events that might have fired.
763    /// Whenever a worker thread executes the Driver loop, all waken tasks are scheduled
764    /// in its own local queue until the queue saturates (ntasks > `LOCAL_QUEUE_CAPACITY`).
765    /// When the local queue is saturated, the overflow tasks are added to the injection queue
766    /// from where other workers can pick them up.
767    /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers
768    /// after all the IOs get dispatched
769    fn park(&self, mut core: Box<Core>) -> Box<Core> {
770        if let Some(f) = &self.worker.handle.shared.config.before_park {
771            f();
772        }
773
774        if core.transition_to_parked(&self.worker) {
775            while !core.is_shutdown && !core.is_traced {
776                core.stats.about_to_park();
777                core.stats
778                    .submit(&self.worker.handle.shared.worker_metrics[self.worker.index]);
779
780                core = self.park_internal(core, None);
781
782                core.stats.unparked();
783
784                // Run regularly scheduled maintenance
785                core.maintenance(&self.worker);
786
787                if core.transition_from_parked(&self.worker) {
788                    break;
789                }
790            }
791        }
792
793        if let Some(f) = &self.worker.handle.shared.config.after_unpark {
794            f();
795        }
796        core
797    }
798
799    fn park_yield(&self, core: Box<Core>) -> Box<Core> {
800        self.park_internal(core, Some(Duration::from_millis(0)))
801    }
802
803    fn park_internal(&self, mut core: Box<Core>, duration: Option<Duration>) -> Box<Core> {
804        self.assert_lifo_enabled_is_correct(&core);
805
806        // Take the parker out of core
807        let mut park = core.park.take().expect("park missing");
808        // Store `core` in context
809        *self.core.borrow_mut() = Some(core);
810
811        #[cfg(feature = "time")]
812        let (duration, auto_advance_duration) = match self.worker.handle.timer_flavor {
813            TimerFlavor::Traditional => (duration, None::<Duration>),
814            #[cfg(tokio_unstable)]
815            TimerFlavor::Alternative => {
816                // Must happens after taking out the parker, as the `Handle::schedule_local`
817                // will delay the notify if the parker taken out.
818                //
819                // See comments in `Handle::schedule_local` for more details.
820                let MaintainLocalTimer {
821                    park_duration: duration,
822                    auto_advance_duration,
823                } = self.maintain_local_timers_before_parking(duration);
824                (duration, auto_advance_duration)
825            }
826        };
827
828        // Park thread
829        if let Some(timeout) = duration {
830            park.park_timeout(&self.worker.handle.driver, timeout);
831        } else {
832            park.park(&self.worker.handle.driver);
833        }
834
835        self.defer.wake();
836
837        #[cfg(feature = "time")]
838        match self.worker.handle.timer_flavor {
839            TimerFlavor::Traditional => {
840                // suppress unused variable warning
841                let _ = auto_advance_duration;
842            }
843            #[cfg(tokio_unstable)]
844            TimerFlavor::Alternative => {
845                // Must happens before placing back the parker, as the `Handle::schedule_local`
846                // will delay the notify if the parker is still in `core`.
847                //
848                // See comments in `Handle::schedule_local` for more details.
849                self.maintain_local_timers_after_parking(auto_advance_duration);
850            }
851        }
852
853        // Remove `core` from context
854        core = self.core.borrow_mut().take().expect("core missing");
855
856        // Place `park` back in `core`
857        core.park = Some(park);
858        if core.should_notify_others() {
859            self.worker.handle.notify_parked_local();
860        }
861        core
862    }
863
864    pub(crate) fn defer(&self, waker: &Waker) {
865        if self.core.borrow().is_none() {
866            // If there is no core, then the worker is currently in a block_in_place. In this case,
867            // we cannot use the defer queue as we aren't really in the current runtime.
868            waker.wake_by_ref();
869        } else {
870            self.defer.defer(waker);
871        }
872    }
873
874    #[cfg(all(tokio_unstable, feature = "time"))]
875    /// Maintain local timers before parking the resource driver.
876    ///
877    /// * Remove cancelled timers from the local timer wheel.
878    /// * Register remote timers to the local timer wheel.
879    /// * Adjust the park duration based on
880    ///   * the next timer expiration time.
881    ///   * whether auto-advancing is required (feature = "test-util").
882    ///
883    /// # Returns
884    ///
885    /// `(Box<Core>, park_duration, auto_advance_duration)`
886    fn maintain_local_timers_before_parking(
887        &self,
888        park_duration: Option<Duration>,
889    ) -> MaintainLocalTimer {
890        let handle = &self.worker.handle;
891        let mut wake_queue = time_alt::WakeQueue::new();
892
893        let (should_yield, next_timer) = with_current(|maybe_cx| {
894            let cx = maybe_cx.expect("function should be called when core is present");
895            assert_eq!(
896                Arc::as_ptr(&cx.worker.handle),
897                Arc::as_ptr(&self.worker.handle),
898                "function should be called on the exact same worker"
899            );
900
901            let mut maybe_core = cx.core.borrow_mut();
902            let core = maybe_core.as_mut().expect("core missing");
903            let time_cx = &mut core.time_context;
904
905            util::time_alt::process_registration_queue(
906                &mut time_cx.registration_queue,
907                &mut time_cx.wheel,
908                &time_cx.canc_tx,
909                &mut wake_queue,
910            );
911            util::time_alt::insert_inject_timers(
912                &mut time_cx.wheel,
913                &time_cx.canc_tx,
914                handle.take_remote_timers(),
915                &mut wake_queue,
916            );
917            util::time_alt::remove_cancelled_timers(&mut time_cx.wheel, &mut time_cx.canc_rx);
918            let should_yield = !wake_queue.is_empty();
919
920            let next_timer = util::time_alt::next_expiration_time(&time_cx.wheel, &handle.driver);
921
922            (should_yield, next_timer)
923        });
924
925        wake_queue.wake_all();
926
927        if should_yield {
928            MaintainLocalTimer {
929                park_duration: Some(Duration::from_millis(0)),
930                auto_advance_duration: None,
931            }
932        } else {
933            // get the minimum duration
934            let dur = util::time_alt::min_duration(park_duration, next_timer);
935            if util::time_alt::pre_auto_advance(&handle.driver, dur) {
936                MaintainLocalTimer {
937                    park_duration: Some(Duration::ZERO),
938                    auto_advance_duration: dur,
939                }
940            } else {
941                MaintainLocalTimer {
942                    park_duration: dur,
943                    auto_advance_duration: None,
944                }
945            }
946        }
947    }
948
949    #[cfg(all(tokio_unstable, feature = "time"))]
950    /// Maintain local timers after unparking the resource driver.
951    ///
952    /// * Auto-advance time, if required (feature = "test-util").
953    /// * Process expired timers.
954    fn maintain_local_timers_after_parking(&self, auto_advance_duration: Option<Duration>) {
955        let handle = &self.worker.handle;
956        let mut wake_queue = time_alt::WakeQueue::new();
957
958        with_current(|maybe_cx| {
959            let cx = maybe_cx.expect("function should be called when core is present");
960            assert_eq!(
961                Arc::as_ptr(&cx.worker.handle),
962                Arc::as_ptr(&self.worker.handle),
963                "function should be called on the exact same worker"
964            );
965
966            let mut maybe_core = cx.core.borrow_mut();
967            let core = maybe_core.as_mut().expect("core missing");
968            let time_cx = &mut core.time_context;
969
970            util::time_alt::post_auto_advance(&handle.driver, auto_advance_duration);
971            util::time_alt::process_expired_timers(
972                &mut time_cx.wheel,
973                &handle.driver,
974                &mut wake_queue,
975            );
976        });
977
978        wake_queue.wake_all();
979    }
980
981    #[cfg(all(tokio_unstable, feature = "time"))]
982    fn with_core<F, R>(&self, f: F) -> R
983    where
984        F: FnOnce(Option<&mut Core>) -> R,
985    {
986        match self.core.borrow_mut().as_mut() {
987            Some(core) => f(Some(core)),
988            None => f(None),
989        }
990    }
991
992    #[cfg(all(tokio_unstable, feature = "time"))]
993    pub(crate) fn with_time_temp_local_context<F, R>(&self, f: F) -> R
994    where
995        F: FnOnce(Option<time_alt::TempLocalContext<'_>>) -> R,
996    {
997        self.with_core(|maybe_core| match maybe_core {
998            Some(core) if core.is_shutdown => f(Some(time_alt::TempLocalContext::new_shutdown())),
999            Some(core) => f(Some(time_alt::TempLocalContext::new_running(
1000                &mut core.time_context,
1001            ))),
1002            None => f(None),
1003        })
1004    }
1005}
1006
1007impl Core {
1008    /// Increment the tick
1009    fn tick(&mut self) {
1010        self.tick = self.tick.wrapping_add(1);
1011    }
1012
1013    /// Return the next notified task available to this worker.
1014    fn next_task(&mut self, worker: &Worker) -> Option<Notified> {
1015        if self.tick % self.global_queue_interval == 0 {
1016            // Update the global queue interval, if needed
1017            self.tune_global_queue_interval(worker);
1018
1019            worker
1020                .handle
1021                .next_remote_task()
1022                .or_else(|| self.next_local_task())
1023        } else {
1024            let maybe_task = self.next_local_task();
1025
1026            if maybe_task.is_some() {
1027                return maybe_task;
1028            }
1029
1030            if worker.inject().is_empty() {
1031                return None;
1032            }
1033
1034            // Other threads can only **remove** tasks from the current worker's
1035            // `run_queue`. So, we can be confident that by the time we call
1036            // `run_queue.push_back` below, there will be *at least* `cap`
1037            // available slots in the queue.
1038            let cap = usize::min(
1039                self.run_queue.remaining_slots(),
1040                self.run_queue.max_capacity() / 2,
1041            );
1042
1043            // The worker is currently idle, pull a batch of work from the
1044            // injection queue. We don't want to pull *all* the work so other
1045            // workers can also get some.
1046            let n = usize::min(
1047                worker.inject().len() / worker.handle.shared.remotes.len() + 1,
1048                cap,
1049            );
1050
1051            // Take at least one task since the first task is returned directly
1052            // and not pushed onto the local queue.
1053            let n = usize::max(1, n);
1054
1055            let mut synced = worker.handle.shared.synced.lock();
1056            // safety: passing in the correct `inject::Synced`.
1057            let mut tasks = unsafe { worker.inject().pop_n(&mut synced.inject, n) };
1058
1059            // Pop the first task to return immediately
1060            let ret = tasks.next();
1061
1062            // Push the rest of the on the run queue
1063            self.run_queue.push_back(tasks);
1064
1065            ret
1066        }
1067    }
1068
1069    fn next_local_task(&mut self) -> Option<Notified> {
1070        self.lifo_slot.take().or_else(|| self.run_queue.pop())
1071    }
1072
1073    /// Function responsible for stealing tasks from another worker
1074    ///
1075    /// Note: Only if less than half the workers are searching for tasks to steal
1076    /// a new worker will actually try to steal. The idea is to make sure not all
1077    /// workers will be trying to steal at the same time.
1078    fn steal_work(&mut self, worker: &Worker) -> Option<Notified> {
1079        if !self.transition_to_searching(worker) {
1080            return None;
1081        }
1082
1083        let num = worker.handle.shared.remotes.len();
1084        // Start from a random worker
1085        let start = self.rand.fastrand_n(num as u32) as usize;
1086
1087        for i in 0..num {
1088            let i = (start + i) % num;
1089
1090            // Don't steal from ourself! We know we don't have work.
1091            if i == worker.index {
1092                continue;
1093            }
1094
1095            let target = &worker.handle.shared.remotes[i];
1096            if let Some(task) = target
1097                .steal
1098                .steal_into(&mut self.run_queue, &mut self.stats)
1099            {
1100                return Some(task);
1101            }
1102        }
1103
1104        // Fallback on checking the global queue
1105        worker.handle.next_remote_task()
1106    }
1107
1108    fn transition_to_searching(&mut self, worker: &Worker) -> bool {
1109        if !self.is_searching {
1110            self.is_searching = worker.handle.shared.idle.transition_worker_to_searching();
1111        }
1112
1113        self.is_searching
1114    }
1115
1116    fn transition_from_searching(&mut self, worker: &Worker) {
1117        if !self.is_searching {
1118            return;
1119        }
1120
1121        self.is_searching = false;
1122        worker.handle.transition_worker_from_searching();
1123    }
1124
1125    fn has_tasks(&self) -> bool {
1126        self.lifo_slot.is_some() || self.run_queue.has_tasks()
1127    }
1128
1129    fn should_notify_others(&self) -> bool {
1130        // If there are tasks available to steal, but this worker is not
1131        // looking for tasks to steal, notify another worker.
1132        if self.is_searching {
1133            return false;
1134        }
1135        self.lifo_slot.is_some() as usize + self.run_queue.len() > 1
1136    }
1137
1138    /// Prepares the worker state for parking.
1139    ///
1140    /// Returns true if the transition happened, false if there is work to do first.
1141    fn transition_to_parked(&mut self, worker: &Worker) -> bool {
1142        // Workers should not park if they have work to do
1143        if self.has_tasks() || self.is_traced {
1144            return false;
1145        }
1146
1147        // When the final worker transitions **out** of searching to parked, it
1148        // must check all the queues one last time in case work materialized
1149        // between the last work scan and transitioning out of searching.
1150        let is_last_searcher = worker.handle.shared.idle.transition_worker_to_parked(
1151            &worker.handle.shared,
1152            worker.index,
1153            self.is_searching,
1154        );
1155
1156        // The worker is no longer searching. Setting this is the local cache
1157        // only.
1158        self.is_searching = false;
1159
1160        if is_last_searcher {
1161            worker.handle.notify_if_work_pending();
1162        }
1163
1164        true
1165    }
1166
1167    /// Returns `true` if the transition happened.
1168    fn transition_from_parked(&mut self, worker: &Worker) -> bool {
1169        // If a task is in the lifo slot/run queue, then we must unpark regardless of
1170        // being notified
1171        if self.has_tasks() {
1172            // When a worker wakes, it should only transition to the "searching"
1173            // state when the wake originates from another worker *or* a new task
1174            // is pushed. We do *not* want the worker to transition to "searching"
1175            // when it wakes when the I/O driver receives new events.
1176            self.is_searching = !worker
1177                .handle
1178                .shared
1179                .idle
1180                .unpark_worker_by_id(&worker.handle.shared, worker.index);
1181            return true;
1182        }
1183
1184        if worker
1185            .handle
1186            .shared
1187            .idle
1188            .is_parked(&worker.handle.shared, worker.index)
1189        {
1190            return false;
1191        }
1192
1193        // When unparked, the worker is in the searching state.
1194        self.is_searching = true;
1195        true
1196    }
1197
1198    /// Runs maintenance work such as checking the pool's state.
1199    fn maintenance(&mut self, worker: &Worker) {
1200        self.stats
1201            .submit(&worker.handle.shared.worker_metrics[worker.index]);
1202
1203        if !self.is_shutdown {
1204            // Check if the scheduler has been shutdown
1205            let synced = worker.handle.shared.synced.lock();
1206            self.is_shutdown = worker.inject().is_closed(&synced.inject);
1207        }
1208
1209        if !self.is_traced {
1210            // Check if the worker should be tracing.
1211            self.is_traced = worker.handle.shared.trace_status.trace_requested();
1212        }
1213    }
1214
1215    /// Signals all tasks to shut down, and waits for them to complete. Must run
1216    /// before we enter the single-threaded phase of shutdown processing.
1217    fn pre_shutdown(&mut self, worker: &Worker) {
1218        // Start from a random inner list
1219        let start = self
1220            .rand
1221            .fastrand_n(worker.handle.shared.owned.get_shard_size() as u32);
1222        // Signal to all tasks to shut down.
1223        worker
1224            .handle
1225            .shared
1226            .owned
1227            .close_and_shutdown_all(start as usize);
1228
1229        self.stats
1230            .submit(&worker.handle.shared.worker_metrics[worker.index]);
1231    }
1232
1233    /// Shuts down the core.
1234    fn shutdown(&mut self, handle: &Handle) {
1235        // Take the core
1236        let mut park = self.park.take().expect("park missing");
1237
1238        // Drain the queue
1239        while self.next_local_task().is_some() {}
1240
1241        park.shutdown(&handle.driver);
1242    }
1243
1244    fn tune_global_queue_interval(&mut self, worker: &Worker) {
1245        let next = self
1246            .stats
1247            .tuned_global_queue_interval(&worker.handle.shared.config);
1248
1249        // Smooth out jitter
1250        if u32::abs_diff(self.global_queue_interval, next) > 2 {
1251            self.global_queue_interval = next;
1252        }
1253    }
1254}
1255
1256impl Worker {
1257    /// Returns a reference to the scheduler's injection queue.
1258    fn inject(&self) -> &inject::Shared<Arc<Handle>> {
1259        &self.handle.shared.inject
1260    }
1261}
1262
1263impl Handle {
1264    pub(super) fn schedule_task(&self, task: Notified, is_yield: bool) {
1265        with_current(|maybe_cx| {
1266            if let Some(cx) = maybe_cx {
1267                // Make sure the task is part of the **current** scheduler.
1268                if self.ptr_eq(&cx.worker.handle) {
1269                    // And the current thread still holds a core
1270                    if let Some(core) = cx.core.borrow_mut().as_mut() {
1271                        self.schedule_local(core, task, is_yield);
1272                        return;
1273                    }
1274                }
1275            }
1276
1277            // Otherwise, use the inject queue.
1278            self.push_remote_task(task);
1279            self.notify_parked_remote();
1280        });
1281    }
1282
1283    pub(super) fn schedule_option_task_without_yield(&self, task: Option<Notified>) {
1284        if let Some(task) = task {
1285            self.schedule_task(task, false);
1286        }
1287    }
1288
1289    fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) {
1290        core.stats.inc_local_schedule_count();
1291
1292        // Spawning from the worker thread. If scheduling a "yield" then the
1293        // task must always be pushed to the back of the queue, enabling other
1294        // tasks to be executed. If **not** a yield, then there is more
1295        // flexibility and the task may go to the front of the queue.
1296        let should_notify = if is_yield || !core.lifo_enabled {
1297            core.run_queue
1298                .push_back_or_overflow(task, self, &mut core.stats);
1299            true
1300        } else {
1301            // Push to the LIFO slot
1302            let prev = core.lifo_slot.take();
1303            let ret = prev.is_some();
1304
1305            if let Some(prev) = prev {
1306                core.run_queue
1307                    .push_back_or_overflow(prev, self, &mut core.stats);
1308            }
1309
1310            core.lifo_slot = Some(task);
1311
1312            ret
1313        };
1314
1315        // Only notify if not currently parked. If `park` is `None`, then the
1316        // scheduling is from a resource driver. As notifications often come in
1317        // batches, the notification is delayed until the park is complete.
1318        if should_notify && core.park.is_some() {
1319            self.notify_parked_local();
1320        }
1321    }
1322
1323    fn next_remote_task(&self) -> Option<Notified> {
1324        if self.shared.inject.is_empty() {
1325            return None;
1326        }
1327
1328        let mut synced = self.shared.synced.lock();
1329        // safety: passing in correct `idle::Synced`
1330        unsafe { self.shared.inject.pop(&mut synced.inject) }
1331    }
1332
1333    fn push_remote_task(&self, task: Notified) {
1334        self.shared.scheduler_metrics.inc_remote_schedule_count();
1335
1336        let mut synced = self.shared.synced.lock();
1337        // safety: passing in correct `idle::Synced`
1338        unsafe {
1339            self.shared.inject.push(&mut synced.inject, task);
1340        }
1341    }
1342
1343    #[cfg(all(tokio_unstable, feature = "time"))]
1344    pub(crate) fn push_remote_timer(&self, hdl: time_alt::EntryHandle) {
1345        assert_eq!(self.timer_flavor, TimerFlavor::Alternative);
1346        {
1347            let mut synced = self.shared.synced.lock();
1348            synced.inject_timers.push(hdl);
1349        }
1350        self.notify_parked_remote();
1351    }
1352
1353    #[cfg(all(tokio_unstable, feature = "time"))]
1354    pub(crate) fn take_remote_timers(&self) -> Vec<time_alt::EntryHandle> {
1355        assert_eq!(self.timer_flavor, TimerFlavor::Alternative);
1356        // It's ok to lost the race, as another worker is
1357        // draining the inject_timers.
1358        match self.shared.synced.try_lock() {
1359            Some(mut synced) => std::mem::take(&mut synced.inject_timers),
1360            None => Vec::new(),
1361        }
1362    }
1363
1364    pub(super) fn close(&self) {
1365        if self
1366            .shared
1367            .inject
1368            .close(&mut self.shared.synced.lock().inject)
1369        {
1370            self.notify_all();
1371        }
1372    }
1373
1374    fn notify_parked_local(&self) {
1375        super::counters::inc_num_inc_notify_local();
1376
1377        if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) {
1378            super::counters::inc_num_unparks_local();
1379            self.shared.remotes[index].unpark.unpark(&self.driver);
1380        }
1381    }
1382
1383    fn notify_parked_remote(&self) {
1384        if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) {
1385            self.shared.remotes[index].unpark.unpark(&self.driver);
1386        }
1387    }
1388
1389    pub(super) fn notify_all(&self) {
1390        for remote in &self.shared.remotes[..] {
1391            remote.unpark.unpark(&self.driver);
1392        }
1393    }
1394
1395    fn notify_if_work_pending(&self) {
1396        for remote in &self.shared.remotes[..] {
1397            if !remote.steal.is_empty() {
1398                self.notify_parked_local();
1399                return;
1400            }
1401        }
1402
1403        if !self.shared.inject.is_empty() {
1404            self.notify_parked_local();
1405        }
1406    }
1407
1408    fn transition_worker_from_searching(&self) {
1409        if self.shared.idle.transition_worker_from_searching() {
1410            // We are the final searching worker. Because work was found, we
1411            // need to notify another worker.
1412            self.notify_parked_local();
1413        }
1414    }
1415
1416    /// Signals that a worker has observed the shutdown signal and has replaced
1417    /// its core back into its handle.
1418    ///
1419    /// If all workers have reached this point, the final cleanup is performed.
1420    fn shutdown_core(&self, core: Box<Core>) {
1421        let mut cores = self.shared.shutdown_cores.lock();
1422        cores.push(core);
1423
1424        if cores.len() != self.shared.remotes.len() {
1425            return;
1426        }
1427
1428        debug_assert!(self.shared.owned.is_empty());
1429
1430        for mut core in cores.drain(..) {
1431            core.shutdown(self);
1432        }
1433
1434        // Drain the injection queue
1435        //
1436        // We already shut down every task, so we can simply drop the tasks.
1437        while let Some(task) = self.next_remote_task() {
1438            drop(task);
1439        }
1440    }
1441
1442    fn ptr_eq(&self, other: &Handle) -> bool {
1443        std::ptr::eq(self, other)
1444    }
1445}
1446
1447impl Overflow<Arc<Handle>> for Handle {
1448    fn push(&self, task: task::Notified<Arc<Handle>>) {
1449        self.push_remote_task(task);
1450    }
1451
1452    fn push_batch<I>(&self, iter: I)
1453    where
1454        I: Iterator<Item = task::Notified<Arc<Handle>>>,
1455    {
1456        unsafe {
1457            self.shared.inject.push_batch(self, iter);
1458        }
1459    }
1460}
1461
1462pub(crate) struct InjectGuard<'a> {
1463    lock: crate::loom::sync::MutexGuard<'a, Synced>,
1464}
1465
1466impl<'a> AsMut<inject::Synced> for InjectGuard<'a> {
1467    fn as_mut(&mut self) -> &mut inject::Synced {
1468        &mut self.lock.inject
1469    }
1470}
1471
1472impl<'a> Lock<inject::Synced> for &'a Handle {
1473    type Handle = InjectGuard<'a>;
1474
1475    fn lock(self) -> Self::Handle {
1476        InjectGuard {
1477            lock: self.shared.synced.lock(),
1478        }
1479    }
1480}
1481
1482#[cfg(all(tokio_unstable, feature = "time"))]
1483/// Returned by [`Context::maintain_local_timers_before_parking`].
1484struct MaintainLocalTimer {
1485    park_duration: Option<Duration>,
1486    auto_advance_duration: Option<Duration>,
1487}
1488
1489#[track_caller]
1490fn with_current<R>(f: impl FnOnce(Option<&Context>) -> R) -> R {
1491    use scheduler::Context::MultiThread;
1492
1493    context::with_scheduler(|ctx| match ctx {
1494        Some(MultiThread(ctx)) => f(Some(ctx)),
1495        _ => f(None),
1496    })
1497}