diff --git a/esphome/core/scheduler.cpp b/esphome/core/scheduler.cpp index 3ff8fa4475..c783a788cc 100644 --- a/esphome/core/scheduler.cpp +++ b/esphome/core/scheduler.cpp @@ -545,7 +545,7 @@ void HOT Scheduler::call(uint32_t now) { // during the function call and know if we were cancelled. auto executed_item = this->pop_raw_locked_(); - if (executed_item->remove) { + if (this->is_item_removed_locked_(executed_item.get())) { // We were removed/cancelled in the function call, recycle and continue this->to_remove_--; this->recycle_item_main_loop_(std::move(executed_item)); @@ -605,7 +605,7 @@ size_t HOT Scheduler::cleanup_() { LockGuard guard{this->lock_}; while (!this->items_.empty()) { auto &item = this->items_[0]; - if (!item->remove) + if (!this->is_item_removed_locked_(item.get())) break; this->to_remove_--; this->recycle_item_main_loop_(this->pop_raw_locked_()); diff --git a/esphome/core/scheduler.h b/esphome/core/scheduler.h index cf9a2a3660..020d822a0a 100644 --- a/esphome/core/scheduler.h +++ b/esphome/core/scheduler.h @@ -470,8 +470,12 @@ class Scheduler { // IMPORTANT: Caller must hold the scheduler lock before calling this function. bool is_item_removed_locked_(SchedulerItem *item) const { #ifdef ESPHOME_THREAD_MULTI_ATOMICS - // Lock already held - relaxed is sufficient, mutex provides ordering - return item->remove.load(std::memory_order_relaxed); + // Lock already held - relaxed is sufficient, mutex provides ordering. + // Use GCC __atomic_load_n builtin instead of std::atomic::load() because + // GCC for Xtensa emits std::atomic::load() as an out-of-line + // libstdc++ call, adding function call overhead that exceeds the memw + // barrier savings this optimization aims to eliminate. + return __atomic_load_n(reinterpret_cast(&item->remove), __ATOMIC_RELAXED); #else return item->remove; #endif