1use crate::loom::sync::{Arc, Mutex};
60use crate::runtime;
61use crate::runtime::scheduler::multi_thread::{
62 idle, queue, Counters, Handle, Idle, Overflow, Parker, Stats, TraceStatus, Unparker,
63};
64use crate::runtime::scheduler::{inject, Defer, Lock};
65use crate::runtime::task::OwnedTasks;
66use crate::runtime::{
67 blocking, driver, scheduler, task, Config, SchedulerMetrics, TimerFlavor, WorkerMetrics,
68};
69use crate::runtime::{context, TaskHooks};
70use crate::task::coop;
71use crate::util::atomic_cell::AtomicCell;
72use crate::util::rand::{FastRand, RngSeedGenerator};
73
74use std::cell::RefCell;
75use std::task::Waker;
76use std::thread;
77use std::time::Duration;
78
79mod metrics;
80
81cfg_taskdump! {
82 mod taskdump;
83}
84
85cfg_not_taskdump! {
86 mod taskdump_mock;
87}
88
89#[cfg(all(tokio_unstable, feature = "time"))]
90use crate::loom::sync::atomic::AtomicBool;
91
92#[cfg(all(tokio_unstable, feature = "time"))]
93use crate::runtime::time_alt;
94
95#[cfg(all(tokio_unstable, feature = "time"))]
96use crate::runtime::scheduler::util;
97
98pub(super) struct Worker {
100 handle: Arc<Handle>,
102
103 index: usize,
105
106 core: AtomicCell<Core>,
108}
109
110struct Core {
112 tick: u32,
114
115 lifo_slot: Option<Notified>,
121
122 lifo_enabled: bool,
125
126 run_queue: queue::Local<Arc<Handle>>,
128
129 #[cfg(all(tokio_unstable, feature = "time"))]
130 time_context: time_alt::LocalContext,
131
132 is_searching: bool,
135
136 is_shutdown: bool,
138
139 is_traced: bool,
141
142 park: Option<Parker>,
147
148 stats: Stats,
150
151 global_queue_interval: u32,
153
154 rand: FastRand,
156}
157
158pub(crate) struct Shared {
160 remotes: Box<[Remote]>,
163
164 pub(super) inject: inject::Shared<Arc<Handle>>,
168
169 idle: Idle,
171
172 pub(crate) owned: OwnedTasks<Arc<Handle>>,
174
175 pub(super) synced: Mutex<Synced>,
177
178 #[allow(clippy::vec_box)] shutdown_cores: Mutex<Vec<Box<Core>>>,
184
185 pub(super) trace_status: TraceStatus,
187
188 config: Config,
190
191 pub(super) scheduler_metrics: SchedulerMetrics,
193
194 pub(super) worker_metrics: Box<[WorkerMetrics]>,
195
196 _counters: Counters,
201}
202
203pub(crate) struct Synced {
205 pub(super) idle: idle::Synced,
207
208 pub(crate) inject: inject::Synced,
210
211 #[cfg(all(tokio_unstable, feature = "time"))]
212 inject_timers: Vec<time_alt::EntryHandle>,
216}
217
218struct Remote {
220 pub(super) steal: queue::Steal<Arc<Handle>>,
222
223 unpark: Unparker,
225}
226
227pub(crate) struct Context {
229 worker: Arc<Worker>,
231
232 core: RefCell<Option<Box<Core>>>,
234
235 pub(crate) defer: Defer,
238}
239
240pub(crate) struct Launch(Vec<Arc<Worker>>);
242
243type RunResult = Result<Box<Core>, ()>;
247
248type Notified = task::Notified<Arc<Handle>>;
250
251const MAX_LIFO_POLLS_PER_TICK: usize = 3;
256
257pub(super) fn create(
258 size: usize,
259 park: Parker,
260 driver_handle: driver::Handle,
261 blocking_spawner: blocking::Spawner,
262 seed_generator: RngSeedGenerator,
263 config: Config,
264 timer_flavor: TimerFlavor,
265) -> (Arc<Handle>, Launch) {
266 let mut cores = Vec::with_capacity(size);
267 let mut remotes = Vec::with_capacity(size);
268 let mut worker_metrics = Vec::with_capacity(size);
269
270 for _ in 0..size {
272 let (steal, run_queue) = queue::local();
273
274 let park = park.clone();
275 let unpark = park.unpark();
276 let metrics = WorkerMetrics::from_config(&config);
277 let stats = Stats::new(&metrics);
278
279 cores.push(Box::new(Core {
280 tick: 0,
281 lifo_slot: None,
282 lifo_enabled: !config.disable_lifo_slot,
283 run_queue,
284 #[cfg(all(tokio_unstable, feature = "time"))]
285 time_context: time_alt::LocalContext::new(),
286 is_searching: false,
287 is_shutdown: false,
288 is_traced: false,
289 park: Some(park),
290 global_queue_interval: stats.tuned_global_queue_interval(&config),
291 stats,
292 rand: FastRand::from_seed(config.seed_generator.next_seed()),
293 }));
294
295 remotes.push(Remote { steal, unpark });
296 worker_metrics.push(metrics);
297 }
298
299 let (idle, idle_synced) = Idle::new(size);
300 let (inject, inject_synced) = inject::Shared::new();
301
302 let remotes_len = remotes.len();
303 let handle = Arc::new(Handle {
304 task_hooks: TaskHooks::from_config(&config),
305 shared: Shared {
306 remotes: remotes.into_boxed_slice(),
307 inject,
308 idle,
309 owned: OwnedTasks::new(size),
310 synced: Mutex::new(Synced {
311 idle: idle_synced,
312 inject: inject_synced,
313 #[cfg(all(tokio_unstable, feature = "time"))]
314 inject_timers: Vec::new(),
315 }),
316 shutdown_cores: Mutex::new(vec![]),
317 trace_status: TraceStatus::new(remotes_len),
318 config,
319 scheduler_metrics: SchedulerMetrics::new(),
320 worker_metrics: worker_metrics.into_boxed_slice(),
321 _counters: Counters,
322 },
323 driver: driver_handle,
324 blocking_spawner,
325 seed_generator,
326 timer_flavor,
327 #[cfg(all(tokio_unstable, feature = "time"))]
328 is_shutdown: AtomicBool::new(false),
329 });
330
331 let mut launch = Launch(vec![]);
332
333 for (index, core) in cores.drain(..).enumerate() {
334 launch.0.push(Arc::new(Worker {
335 handle: handle.clone(),
336 index,
337 core: AtomicCell::new(Some(core)),
338 }));
339 }
340
341 (handle, launch)
342}
343
344#[track_caller]
345pub(crate) fn block_in_place<F, R>(f: F) -> R
346where
347 F: FnOnce() -> R,
348{
349 struct Reset {
351 take_core: bool,
352 budget: coop::Budget,
353 }
354
355 impl Drop for Reset {
356 fn drop(&mut self) {
357 with_current(|maybe_cx| {
358 if let Some(cx) = maybe_cx {
359 if self.take_core {
360 let core = cx.worker.core.take();
361
362 if core.is_some() {
363 cx.worker.handle.shared.worker_metrics[cx.worker.index]
364 .set_thread_id(thread::current().id());
365 }
366
367 let mut cx_core = cx.core.borrow_mut();
368 assert!(cx_core.is_none());
369 *cx_core = core;
370 }
371
372 coop::set(self.budget);
375 }
376 });
377 }
378 }
379
380 let mut had_entered = false;
381 let mut take_core = false;
382
383 let setup_result = with_current(|maybe_cx| {
384 match (
385 crate::runtime::context::current_enter_context(),
386 maybe_cx.is_some(),
387 ) {
388 (context::EnterRuntime::Entered { .. }, true) => {
389 had_entered = true;
392 }
393 (
394 context::EnterRuntime::Entered {
395 allow_block_in_place,
396 },
397 false,
398 ) => {
399 if allow_block_in_place {
403 had_entered = true;
404 return Ok(());
405 } else {
406 return Err(
409 "can call blocking only when running on the multi-threaded runtime",
410 );
411 }
412 }
413 (context::EnterRuntime::NotEntered, true) => {
414 return Ok(());
417 }
418 (context::EnterRuntime::NotEntered, false) => {
419 return Ok(());
422 }
423 }
424
425 let cx = maybe_cx.expect("no .is_some() == false cases above should lead here");
426
427 let mut core = match cx.core.borrow_mut().take() {
429 Some(core) => core,
430 None => return Ok(()),
431 };
432
433 if let Some(task) = core.lifo_slot.take() {
437 core.run_queue
438 .push_back_or_overflow(task, &*cx.worker.handle, &mut core.stats);
439 }
440
441 take_core = true;
444
445 assert!(core.park.is_some());
447
448 cx.worker.core.set(core);
453
454 let worker = cx.worker.clone();
460 runtime::spawn_blocking(move || run(worker));
461 Ok(())
462 });
463
464 if let Err(panic_message) = setup_result {
465 panic!("{}", panic_message);
466 }
467
468 if had_entered {
469 let _reset = Reset {
472 take_core,
473 budget: coop::stop(),
474 };
475
476 crate::runtime::context::exit_runtime(f)
477 } else {
478 f()
479 }
480}
481
482impl Launch {
483 pub(crate) fn launch(mut self) {
484 for worker in self.0.drain(..) {
485 runtime::spawn_blocking(move || run(worker));
486 }
487 }
488}
489
490fn run(worker: Arc<Worker>) {
491 #[allow(dead_code)]
492 struct AbortOnPanic;
493
494 impl Drop for AbortOnPanic {
495 fn drop(&mut self) {
496 if std::thread::panicking() {
497 eprintln!("worker thread panicking; aborting process");
498 std::process::abort();
499 }
500 }
501 }
502
503 #[cfg(debug_assertions)]
506 let _abort_on_panic = AbortOnPanic;
507
508 let core = match worker.core.take() {
511 Some(core) => core,
512 None => return,
513 };
514
515 worker.handle.shared.worker_metrics[worker.index].set_thread_id(thread::current().id());
516
517 let handle = scheduler::Handle::MultiThread(worker.handle.clone());
518
519 crate::runtime::context::enter_runtime(&handle, true, |_| {
520 let cx = scheduler::Context::MultiThread(Context {
522 worker,
523 core: RefCell::new(None),
524 defer: Defer::new(),
525 });
526
527 context::set_scheduler(&cx, || {
528 let cx = cx.expect_multi_thread();
529
530 assert!(cx.run(core).is_err());
533
534 cx.defer.wake();
538 });
539 });
540}
541
542impl Context {
543 fn run(&self, mut core: Box<Core>) -> RunResult {
544 self.reset_lifo_enabled(&mut core);
547
548 core.stats.start_processing_scheduled_tasks();
551
552 while !core.is_shutdown {
553 self.assert_lifo_enabled_is_correct(&core);
554
555 if core.is_traced {
556 core = self.worker.handle.trace_core(core);
557 }
558
559 core.tick();
561
562 core = self.maintenance(core);
564
565 if let Some(task) = core.next_task(&self.worker) {
567 core = self.run_task(task, core)?;
568 continue;
569 }
570
571 core.stats.end_processing_scheduled_tasks();
573
574 if let Some(task) = core.steal_work(&self.worker) {
577 core.stats.start_processing_scheduled_tasks();
579 core = self.run_task(task, core)?;
580 } else {
581 core = if !self.defer.is_empty() {
583 self.park_yield(core)
584 } else {
585 self.park(core)
586 };
587 core.stats.start_processing_scheduled_tasks();
588 }
589 }
590
591 #[cfg(all(tokio_unstable, feature = "time"))]
592 {
593 match self.worker.handle.timer_flavor {
594 TimerFlavor::Traditional => {}
595 TimerFlavor::Alternative => {
596 util::time_alt::shutdown_local_timers(
597 &mut core.time_context.wheel,
598 &mut core.time_context.canc_rx,
599 self.worker.handle.take_remote_timers(),
600 &self.worker.handle.driver,
601 );
602 }
603 }
604 }
605
606 core.pre_shutdown(&self.worker);
607 self.worker.handle.shutdown_core(core);
609 Err(())
610 }
611
612 fn run_task(&self, task: Notified, mut core: Box<Core>) -> RunResult {
613 #[cfg(tokio_unstable)]
614 let task_meta = task.task_meta();
615
616 let task = self.worker.handle.shared.owned.assert_owner(task);
617
618 core.transition_from_searching(&self.worker);
621
622 self.assert_lifo_enabled_is_correct(&core);
623
624 core.stats.start_poll();
629
630 *self.core.borrow_mut() = Some(core);
632
633 coop::budget(|| {
635 #[cfg(tokio_unstable)]
638 self.worker
639 .handle
640 .task_hooks
641 .poll_start_callback(&task_meta);
642
643 task.run();
644
645 #[cfg(tokio_unstable)]
646 self.worker.handle.task_hooks.poll_stop_callback(&task_meta);
647
648 let mut lifo_polls = 0;
649
650 loop {
653 let mut core = match self.core.borrow_mut().take() {
656 Some(core) => core,
657 None => {
658 return Err(());
662 }
663 };
664
665 let task = match core.lifo_slot.take() {
667 Some(task) => task,
668 None => {
669 self.reset_lifo_enabled(&mut core);
670 core.stats.end_poll();
671 return Ok(core);
672 }
673 };
674
675 if !coop::has_budget_remaining() {
676 core.stats.end_poll();
677
678 core.run_queue.push_back_or_overflow(
681 task,
682 &*self.worker.handle,
683 &mut core.stats,
684 );
685 debug_assert!(core.lifo_enabled);
688 return Ok(core);
689 }
690
691 lifo_polls += 1;
693 super::counters::inc_lifo_schedules();
694
695 if lifo_polls >= MAX_LIFO_POLLS_PER_TICK {
703 core.lifo_enabled = false;
704 super::counters::inc_lifo_capped();
705 }
706
707 *self.core.borrow_mut() = Some(core);
709 let task = self.worker.handle.shared.owned.assert_owner(task);
710
711 #[cfg(tokio_unstable)]
712 let task_meta = task.task_meta();
713
714 #[cfg(tokio_unstable)]
715 self.worker
716 .handle
717 .task_hooks
718 .poll_start_callback(&task_meta);
719
720 task.run();
721
722 #[cfg(tokio_unstable)]
723 self.worker.handle.task_hooks.poll_stop_callback(&task_meta);
724 }
725 })
726 }
727
728 fn reset_lifo_enabled(&self, core: &mut Core) {
729 core.lifo_enabled = !self.worker.handle.shared.config.disable_lifo_slot;
730 }
731
732 fn assert_lifo_enabled_is_correct(&self, core: &Core) {
733 debug_assert_eq!(
734 core.lifo_enabled,
735 !self.worker.handle.shared.config.disable_lifo_slot
736 );
737 }
738
739 fn maintenance(&self, mut core: Box<Core>) -> Box<Core> {
740 if core.tick % self.worker.handle.shared.config.event_interval == 0 {
741 super::counters::inc_num_maintenance();
742
743 core.stats.end_processing_scheduled_tasks();
744
745 core = self.park_yield(core);
748
749 core.maintenance(&self.worker);
751
752 core.stats.start_processing_scheduled_tasks();
753 }
754
755 core
756 }
757
758 fn park(&self, mut core: Box<Core>) -> Box<Core> {
770 if let Some(f) = &self.worker.handle.shared.config.before_park {
771 f();
772 }
773
774 if core.transition_to_parked(&self.worker) {
775 while !core.is_shutdown && !core.is_traced {
776 core.stats.about_to_park();
777 core.stats
778 .submit(&self.worker.handle.shared.worker_metrics[self.worker.index]);
779
780 core = self.park_internal(core, None);
781
782 core.stats.unparked();
783
784 core.maintenance(&self.worker);
786
787 if core.transition_from_parked(&self.worker) {
788 break;
789 }
790 }
791 }
792
793 if let Some(f) = &self.worker.handle.shared.config.after_unpark {
794 f();
795 }
796 core
797 }
798
799 fn park_yield(&self, core: Box<Core>) -> Box<Core> {
800 self.park_internal(core, Some(Duration::from_millis(0)))
801 }
802
803 fn park_internal(&self, mut core: Box<Core>, duration: Option<Duration>) -> Box<Core> {
804 self.assert_lifo_enabled_is_correct(&core);
805
806 let mut park = core.park.take().expect("park missing");
808 *self.core.borrow_mut() = Some(core);
810
811 #[cfg(feature = "time")]
812 let (duration, auto_advance_duration) = match self.worker.handle.timer_flavor {
813 TimerFlavor::Traditional => (duration, None::<Duration>),
814 #[cfg(tokio_unstable)]
815 TimerFlavor::Alternative => {
816 let MaintainLocalTimer {
821 park_duration: duration,
822 auto_advance_duration,
823 } = self.maintain_local_timers_before_parking(duration);
824 (duration, auto_advance_duration)
825 }
826 };
827
828 if let Some(timeout) = duration {
830 park.park_timeout(&self.worker.handle.driver, timeout);
831 } else {
832 park.park(&self.worker.handle.driver);
833 }
834
835 self.defer.wake();
836
837 #[cfg(feature = "time")]
838 match self.worker.handle.timer_flavor {
839 TimerFlavor::Traditional => {
840 let _ = auto_advance_duration;
842 }
843 #[cfg(tokio_unstable)]
844 TimerFlavor::Alternative => {
845 self.maintain_local_timers_after_parking(auto_advance_duration);
850 }
851 }
852
853 core = self.core.borrow_mut().take().expect("core missing");
855
856 core.park = Some(park);
858 if core.should_notify_others() {
859 self.worker.handle.notify_parked_local();
860 }
861 core
862 }
863
864 pub(crate) fn defer(&self, waker: &Waker) {
865 if self.core.borrow().is_none() {
866 waker.wake_by_ref();
869 } else {
870 self.defer.defer(waker);
871 }
872 }
873
874 #[cfg(all(tokio_unstable, feature = "time"))]
875 fn maintain_local_timers_before_parking(
887 &self,
888 park_duration: Option<Duration>,
889 ) -> MaintainLocalTimer {
890 let handle = &self.worker.handle;
891 let mut wake_queue = time_alt::WakeQueue::new();
892
893 let (should_yield, next_timer) = with_current(|maybe_cx| {
894 let cx = maybe_cx.expect("function should be called when core is present");
895 assert_eq!(
896 Arc::as_ptr(&cx.worker.handle),
897 Arc::as_ptr(&self.worker.handle),
898 "function should be called on the exact same worker"
899 );
900
901 let mut maybe_core = cx.core.borrow_mut();
902 let core = maybe_core.as_mut().expect("core missing");
903 let time_cx = &mut core.time_context;
904
905 util::time_alt::process_registration_queue(
906 &mut time_cx.registration_queue,
907 &mut time_cx.wheel,
908 &time_cx.canc_tx,
909 &mut wake_queue,
910 );
911 util::time_alt::insert_inject_timers(
912 &mut time_cx.wheel,
913 &time_cx.canc_tx,
914 handle.take_remote_timers(),
915 &mut wake_queue,
916 );
917 util::time_alt::remove_cancelled_timers(&mut time_cx.wheel, &mut time_cx.canc_rx);
918 let should_yield = !wake_queue.is_empty();
919
920 let next_timer = util::time_alt::next_expiration_time(&time_cx.wheel, &handle.driver);
921
922 (should_yield, next_timer)
923 });
924
925 wake_queue.wake_all();
926
927 if should_yield {
928 MaintainLocalTimer {
929 park_duration: Some(Duration::from_millis(0)),
930 auto_advance_duration: None,
931 }
932 } else {
933 let dur = util::time_alt::min_duration(park_duration, next_timer);
935 if util::time_alt::pre_auto_advance(&handle.driver, dur) {
936 MaintainLocalTimer {
937 park_duration: Some(Duration::ZERO),
938 auto_advance_duration: dur,
939 }
940 } else {
941 MaintainLocalTimer {
942 park_duration: dur,
943 auto_advance_duration: None,
944 }
945 }
946 }
947 }
948
949 #[cfg(all(tokio_unstable, feature = "time"))]
950 fn maintain_local_timers_after_parking(&self, auto_advance_duration: Option<Duration>) {
955 let handle = &self.worker.handle;
956 let mut wake_queue = time_alt::WakeQueue::new();
957
958 with_current(|maybe_cx| {
959 let cx = maybe_cx.expect("function should be called when core is present");
960 assert_eq!(
961 Arc::as_ptr(&cx.worker.handle),
962 Arc::as_ptr(&self.worker.handle),
963 "function should be called on the exact same worker"
964 );
965
966 let mut maybe_core = cx.core.borrow_mut();
967 let core = maybe_core.as_mut().expect("core missing");
968 let time_cx = &mut core.time_context;
969
970 util::time_alt::post_auto_advance(&handle.driver, auto_advance_duration);
971 util::time_alt::process_expired_timers(
972 &mut time_cx.wheel,
973 &handle.driver,
974 &mut wake_queue,
975 );
976 });
977
978 wake_queue.wake_all();
979 }
980
981 #[cfg(all(tokio_unstable, feature = "time"))]
982 fn with_core<F, R>(&self, f: F) -> R
983 where
984 F: FnOnce(Option<&mut Core>) -> R,
985 {
986 match self.core.borrow_mut().as_mut() {
987 Some(core) => f(Some(core)),
988 None => f(None),
989 }
990 }
991
992 #[cfg(all(tokio_unstable, feature = "time"))]
993 pub(crate) fn with_time_temp_local_context<F, R>(&self, f: F) -> R
994 where
995 F: FnOnce(Option<time_alt::TempLocalContext<'_>>) -> R,
996 {
997 self.with_core(|maybe_core| match maybe_core {
998 Some(core) if core.is_shutdown => f(Some(time_alt::TempLocalContext::new_shutdown())),
999 Some(core) => f(Some(time_alt::TempLocalContext::new_running(
1000 &mut core.time_context,
1001 ))),
1002 None => f(None),
1003 })
1004 }
1005}
1006
1007impl Core {
1008 fn tick(&mut self) {
1010 self.tick = self.tick.wrapping_add(1);
1011 }
1012
1013 fn next_task(&mut self, worker: &Worker) -> Option<Notified> {
1015 if self.tick % self.global_queue_interval == 0 {
1016 self.tune_global_queue_interval(worker);
1018
1019 worker
1020 .handle
1021 .next_remote_task()
1022 .or_else(|| self.next_local_task())
1023 } else {
1024 let maybe_task = self.next_local_task();
1025
1026 if maybe_task.is_some() {
1027 return maybe_task;
1028 }
1029
1030 if worker.inject().is_empty() {
1031 return None;
1032 }
1033
1034 let cap = usize::min(
1039 self.run_queue.remaining_slots(),
1040 self.run_queue.max_capacity() / 2,
1041 );
1042
1043 let n = usize::min(
1047 worker.inject().len() / worker.handle.shared.remotes.len() + 1,
1048 cap,
1049 );
1050
1051 let n = usize::max(1, n);
1054
1055 let mut synced = worker.handle.shared.synced.lock();
1056 let mut tasks = unsafe { worker.inject().pop_n(&mut synced.inject, n) };
1058
1059 let ret = tasks.next();
1061
1062 self.run_queue.push_back(tasks);
1064
1065 ret
1066 }
1067 }
1068
1069 fn next_local_task(&mut self) -> Option<Notified> {
1070 self.lifo_slot.take().or_else(|| self.run_queue.pop())
1071 }
1072
1073 fn steal_work(&mut self, worker: &Worker) -> Option<Notified> {
1079 if !self.transition_to_searching(worker) {
1080 return None;
1081 }
1082
1083 let num = worker.handle.shared.remotes.len();
1084 let start = self.rand.fastrand_n(num as u32) as usize;
1086
1087 for i in 0..num {
1088 let i = (start + i) % num;
1089
1090 if i == worker.index {
1092 continue;
1093 }
1094
1095 let target = &worker.handle.shared.remotes[i];
1096 if let Some(task) = target
1097 .steal
1098 .steal_into(&mut self.run_queue, &mut self.stats)
1099 {
1100 return Some(task);
1101 }
1102 }
1103
1104 worker.handle.next_remote_task()
1106 }
1107
1108 fn transition_to_searching(&mut self, worker: &Worker) -> bool {
1109 if !self.is_searching {
1110 self.is_searching = worker.handle.shared.idle.transition_worker_to_searching();
1111 }
1112
1113 self.is_searching
1114 }
1115
1116 fn transition_from_searching(&mut self, worker: &Worker) {
1117 if !self.is_searching {
1118 return;
1119 }
1120
1121 self.is_searching = false;
1122 worker.handle.transition_worker_from_searching();
1123 }
1124
1125 fn has_tasks(&self) -> bool {
1126 self.lifo_slot.is_some() || self.run_queue.has_tasks()
1127 }
1128
1129 fn should_notify_others(&self) -> bool {
1130 if self.is_searching {
1133 return false;
1134 }
1135 self.lifo_slot.is_some() as usize + self.run_queue.len() > 1
1136 }
1137
1138 fn transition_to_parked(&mut self, worker: &Worker) -> bool {
1142 if self.has_tasks() || self.is_traced {
1144 return false;
1145 }
1146
1147 let is_last_searcher = worker.handle.shared.idle.transition_worker_to_parked(
1151 &worker.handle.shared,
1152 worker.index,
1153 self.is_searching,
1154 );
1155
1156 self.is_searching = false;
1159
1160 if is_last_searcher {
1161 worker.handle.notify_if_work_pending();
1162 }
1163
1164 true
1165 }
1166
1167 fn transition_from_parked(&mut self, worker: &Worker) -> bool {
1169 if self.has_tasks() {
1172 self.is_searching = !worker
1177 .handle
1178 .shared
1179 .idle
1180 .unpark_worker_by_id(&worker.handle.shared, worker.index);
1181 return true;
1182 }
1183
1184 if worker
1185 .handle
1186 .shared
1187 .idle
1188 .is_parked(&worker.handle.shared, worker.index)
1189 {
1190 return false;
1191 }
1192
1193 self.is_searching = true;
1195 true
1196 }
1197
1198 fn maintenance(&mut self, worker: &Worker) {
1200 self.stats
1201 .submit(&worker.handle.shared.worker_metrics[worker.index]);
1202
1203 if !self.is_shutdown {
1204 let synced = worker.handle.shared.synced.lock();
1206 self.is_shutdown = worker.inject().is_closed(&synced.inject);
1207 }
1208
1209 if !self.is_traced {
1210 self.is_traced = worker.handle.shared.trace_status.trace_requested();
1212 }
1213 }
1214
1215 fn pre_shutdown(&mut self, worker: &Worker) {
1218 let start = self
1220 .rand
1221 .fastrand_n(worker.handle.shared.owned.get_shard_size() as u32);
1222 worker
1224 .handle
1225 .shared
1226 .owned
1227 .close_and_shutdown_all(start as usize);
1228
1229 self.stats
1230 .submit(&worker.handle.shared.worker_metrics[worker.index]);
1231 }
1232
1233 fn shutdown(&mut self, handle: &Handle) {
1235 let mut park = self.park.take().expect("park missing");
1237
1238 while self.next_local_task().is_some() {}
1240
1241 park.shutdown(&handle.driver);
1242 }
1243
1244 fn tune_global_queue_interval(&mut self, worker: &Worker) {
1245 let next = self
1246 .stats
1247 .tuned_global_queue_interval(&worker.handle.shared.config);
1248
1249 if u32::abs_diff(self.global_queue_interval, next) > 2 {
1251 self.global_queue_interval = next;
1252 }
1253 }
1254}
1255
1256impl Worker {
1257 fn inject(&self) -> &inject::Shared<Arc<Handle>> {
1259 &self.handle.shared.inject
1260 }
1261}
1262
1263impl Handle {
1264 pub(super) fn schedule_task(&self, task: Notified, is_yield: bool) {
1265 with_current(|maybe_cx| {
1266 if let Some(cx) = maybe_cx {
1267 if self.ptr_eq(&cx.worker.handle) {
1269 if let Some(core) = cx.core.borrow_mut().as_mut() {
1271 self.schedule_local(core, task, is_yield);
1272 return;
1273 }
1274 }
1275 }
1276
1277 self.push_remote_task(task);
1279 self.notify_parked_remote();
1280 });
1281 }
1282
1283 pub(super) fn schedule_option_task_without_yield(&self, task: Option<Notified>) {
1284 if let Some(task) = task {
1285 self.schedule_task(task, false);
1286 }
1287 }
1288
1289 fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) {
1290 core.stats.inc_local_schedule_count();
1291
1292 let should_notify = if is_yield || !core.lifo_enabled {
1297 core.run_queue
1298 .push_back_or_overflow(task, self, &mut core.stats);
1299 true
1300 } else {
1301 let prev = core.lifo_slot.take();
1303 let ret = prev.is_some();
1304
1305 if let Some(prev) = prev {
1306 core.run_queue
1307 .push_back_or_overflow(prev, self, &mut core.stats);
1308 }
1309
1310 core.lifo_slot = Some(task);
1311
1312 ret
1313 };
1314
1315 if should_notify && core.park.is_some() {
1319 self.notify_parked_local();
1320 }
1321 }
1322
1323 fn next_remote_task(&self) -> Option<Notified> {
1324 if self.shared.inject.is_empty() {
1325 return None;
1326 }
1327
1328 let mut synced = self.shared.synced.lock();
1329 unsafe { self.shared.inject.pop(&mut synced.inject) }
1331 }
1332
1333 fn push_remote_task(&self, task: Notified) {
1334 self.shared.scheduler_metrics.inc_remote_schedule_count();
1335
1336 let mut synced = self.shared.synced.lock();
1337 unsafe {
1339 self.shared.inject.push(&mut synced.inject, task);
1340 }
1341 }
1342
1343 #[cfg(all(tokio_unstable, feature = "time"))]
1344 pub(crate) fn push_remote_timer(&self, hdl: time_alt::EntryHandle) {
1345 assert_eq!(self.timer_flavor, TimerFlavor::Alternative);
1346 {
1347 let mut synced = self.shared.synced.lock();
1348 synced.inject_timers.push(hdl);
1349 }
1350 self.notify_parked_remote();
1351 }
1352
1353 #[cfg(all(tokio_unstable, feature = "time"))]
1354 pub(crate) fn take_remote_timers(&self) -> Vec<time_alt::EntryHandle> {
1355 assert_eq!(self.timer_flavor, TimerFlavor::Alternative);
1356 match self.shared.synced.try_lock() {
1359 Some(mut synced) => std::mem::take(&mut synced.inject_timers),
1360 None => Vec::new(),
1361 }
1362 }
1363
1364 pub(super) fn close(&self) {
1365 if self
1366 .shared
1367 .inject
1368 .close(&mut self.shared.synced.lock().inject)
1369 {
1370 self.notify_all();
1371 }
1372 }
1373
1374 fn notify_parked_local(&self) {
1375 super::counters::inc_num_inc_notify_local();
1376
1377 if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) {
1378 super::counters::inc_num_unparks_local();
1379 self.shared.remotes[index].unpark.unpark(&self.driver);
1380 }
1381 }
1382
1383 fn notify_parked_remote(&self) {
1384 if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) {
1385 self.shared.remotes[index].unpark.unpark(&self.driver);
1386 }
1387 }
1388
1389 pub(super) fn notify_all(&self) {
1390 for remote in &self.shared.remotes[..] {
1391 remote.unpark.unpark(&self.driver);
1392 }
1393 }
1394
1395 fn notify_if_work_pending(&self) {
1396 for remote in &self.shared.remotes[..] {
1397 if !remote.steal.is_empty() {
1398 self.notify_parked_local();
1399 return;
1400 }
1401 }
1402
1403 if !self.shared.inject.is_empty() {
1404 self.notify_parked_local();
1405 }
1406 }
1407
1408 fn transition_worker_from_searching(&self) {
1409 if self.shared.idle.transition_worker_from_searching() {
1410 self.notify_parked_local();
1413 }
1414 }
1415
1416 fn shutdown_core(&self, core: Box<Core>) {
1421 let mut cores = self.shared.shutdown_cores.lock();
1422 cores.push(core);
1423
1424 if cores.len() != self.shared.remotes.len() {
1425 return;
1426 }
1427
1428 debug_assert!(self.shared.owned.is_empty());
1429
1430 for mut core in cores.drain(..) {
1431 core.shutdown(self);
1432 }
1433
1434 while let Some(task) = self.next_remote_task() {
1438 drop(task);
1439 }
1440 }
1441
1442 fn ptr_eq(&self, other: &Handle) -> bool {
1443 std::ptr::eq(self, other)
1444 }
1445}
1446
1447impl Overflow<Arc<Handle>> for Handle {
1448 fn push(&self, task: task::Notified<Arc<Handle>>) {
1449 self.push_remote_task(task);
1450 }
1451
1452 fn push_batch<I>(&self, iter: I)
1453 where
1454 I: Iterator<Item = task::Notified<Arc<Handle>>>,
1455 {
1456 unsafe {
1457 self.shared.inject.push_batch(self, iter);
1458 }
1459 }
1460}
1461
1462pub(crate) struct InjectGuard<'a> {
1463 lock: crate::loom::sync::MutexGuard<'a, Synced>,
1464}
1465
1466impl<'a> AsMut<inject::Synced> for InjectGuard<'a> {
1467 fn as_mut(&mut self) -> &mut inject::Synced {
1468 &mut self.lock.inject
1469 }
1470}
1471
1472impl<'a> Lock<inject::Synced> for &'a Handle {
1473 type Handle = InjectGuard<'a>;
1474
1475 fn lock(self) -> Self::Handle {
1476 InjectGuard {
1477 lock: self.shared.synced.lock(),
1478 }
1479 }
1480}
1481
1482#[cfg(all(tokio_unstable, feature = "time"))]
1483struct MaintainLocalTimer {
1485 park_duration: Option<Duration>,
1486 auto_advance_duration: Option<Duration>,
1487}
1488
1489#[track_caller]
1490fn with_current<R>(f: impl FnOnce(Option<&Context>) -> R) -> R {
1491 use scheduler::Context::MultiThread;
1492
1493 context::with_scheduler(|ctx| match ctx {
1494 Some(MultiThread(ctx)) => f(Some(ctx)),
1495 _ => f(None),
1496 })
1497}