Server IP : 80.87.202.40 / Your IP : 216.73.216.169 Web Server : Apache System : Linux rospirotorg.ru 5.14.0-539.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Dec 5 22:26:13 UTC 2024 x86_64 User : bitrix ( 600) PHP Version : 8.2.27 Disable Function : NONE MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : OFF | Sudo : ON | Pkexec : ON Directory : /usr/src/kernels/5.14.0-539.el9.x86_64/include/linux/sched/ |
Upload File : |
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_IDLE_H #define _LINUX_SCHED_IDLE_H #include <linux/sched.h> enum cpu_idle_type { __CPU_NOT_IDLE = 0, CPU_IDLE, CPU_NEWLY_IDLE, CPU_MAX_IDLE_TYPES }; #ifdef CONFIG_SMP extern void wake_up_if_idle(int cpu); #else static inline void wake_up_if_idle(int cpu) { } #endif /* * Idle thread specific functions to determine the need_resched * polling state. */ #ifdef TIF_POLLING_NRFLAG static inline void __current_set_polling(void) { set_thread_flag(TIF_POLLING_NRFLAG); } static inline bool __must_check current_set_polling_and_test(void) { __current_set_polling(); /* * Polling state must be visible before we test NEED_RESCHED, * paired by resched_curr() */ smp_mb__after_atomic(); return unlikely(tif_need_resched()); } static inline void __current_clr_polling(void) { clear_thread_flag(TIF_POLLING_NRFLAG); } static inline bool __must_check current_clr_polling_and_test(void) { __current_clr_polling(); /* * Polling state must be visible before we test NEED_RESCHED, * paired by resched_curr() */ smp_mb__after_atomic(); return unlikely(tif_need_resched()); } #else static inline void __current_set_polling(void) { } static inline void __current_clr_polling(void) { } static inline bool __must_check current_set_polling_and_test(void) { return unlikely(tif_need_resched()); } static inline bool __must_check current_clr_polling_and_test(void) { return unlikely(tif_need_resched()); } #endif static inline void current_clr_polling(void) { __current_clr_polling(); /* * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. * Once the bit is cleared, we'll get IPIs with every new * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also * fold. */ smp_mb(); /* paired with resched_curr() */ preempt_fold_need_resched(); } #endif /* _LINUX_SCHED_IDLE_H */