Merge branch 'scheduler_relaxed_atomics_under_lock' into integration

This commit is contained in:
J. Nick Koston
2026-02-19 23:44:05 -06:00
2 changed files with 24 additions and 8 deletions

View File

@@ -406,7 +406,7 @@ void Scheduler::full_cleanup_removed_items_() {
// Compact in-place: move valid items forward, recycle removed ones
size_t write = 0;
for (size_t read = 0; read < this->items_.size(); ++read) {
if (!is_item_removed_(this->items_[read].get())) {
if (!is_item_removed_locked_(this->items_[read].get())) {
if (write != read) {
this->items_[write] = std::move(this->items_[read]);
}
@@ -531,7 +531,7 @@ void HOT Scheduler::call(uint32_t now) {
// Multi-threaded platforms without atomics: must take lock to safely read remove flag
{
LockGuard guard{this->lock_};
if (is_item_removed_(item.get())) {
if (is_item_removed_locked_(item.get())) {
this->recycle_item_main_loop_(this->pop_raw_locked_());
this->to_remove_--;
continue;
@@ -568,7 +568,7 @@ void HOT Scheduler::call(uint32_t now) {
// during the function call and know if we were cancelled.
auto executed_item = this->pop_raw_locked_();
if (executed_item->remove) {
if (this->is_item_removed_locked_(executed_item.get())) {
// We were removed/cancelled in the function call, recycle and continue
this->to_remove_--;
this->recycle_item_main_loop_(std::move(executed_item));
@@ -595,7 +595,7 @@ void HOT Scheduler::call(uint32_t now) {
void HOT Scheduler::process_to_add() {
LockGuard guard{this->lock_};
for (auto &it : this->to_add_) {
if (is_item_removed_(it.get())) {
if (is_item_removed_locked_(it.get())) {
// Recycle cancelled items
this->recycle_item_main_loop_(std::move(it));
continue;
@@ -628,7 +628,7 @@ size_t HOT Scheduler::cleanup_() {
LockGuard guard{this->lock_};
while (!this->items_.empty()) {
auto &item = this->items_[0];
if (!item->remove)
if (!this->is_item_removed_locked_(item.get()))
break;
this->to_remove_--;
this->recycle_item_main_loop_(this->pop_raw_locked_());

View File

@@ -310,8 +310,8 @@ class Scheduler {
// Fixes: https://github.com/esphome/esphome/issues/11940
if (!item)
return false;
if (item->component != component || item->type != type || (skip_removed && item->remove) ||
(match_retry && !item->is_retry)) {
if (item->component != component || item->type != type ||
(skip_removed && this->is_item_removed_locked_(item.get())) || (match_retry && !item->is_retry)) {
return false;
}
// Name type must match
@@ -459,6 +459,22 @@ class Scheduler {
#endif
}
// Helper to check if item is marked for removal when lock is already held.
// Uses relaxed ordering since the mutex provides all necessary synchronization.
// IMPORTANT: Caller must hold the scheduler lock before calling this function.
bool is_item_removed_locked_(SchedulerItem *item) const {
#ifdef ESPHOME_THREAD_MULTI_ATOMICS
// Lock already held - relaxed is sufficient, mutex provides ordering.
// Use GCC __atomic_load_n builtin instead of std::atomic::load() because
// GCC for Xtensa emits std::atomic<bool>::load() as an out-of-line
// libstdc++ call, adding function call overhead that exceeds the memw
// barrier savings this optimization aims to eliminate.
return __atomic_load_n(reinterpret_cast<const bool *>(&item->remove), __ATOMIC_RELAXED);
#else
return item->remove;
#endif
}
// Helper to set item removal flag (platform-specific)
// For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
// function. Uses memory_order_release when setting to true (for cancellation synchronization),
@@ -515,7 +531,7 @@ class Scheduler {
// it will iterate over these nullptr items. This check prevents crashes.
if (!item)
continue;
if (is_item_removed_(item.get()) &&
if (this->is_item_removed_locked_(item.get()) &&
this->matches_item_locked_(item, component, name_type, static_name, hash_or_id, SchedulerItem::TIMEOUT,
match_retry, /* skip_removed= */ false)) {
return true;