diff --git a/include/iocore/cache/CacheDefs.h b/include/iocore/cache/CacheDefs.h index c5b3e304bf4..17bf581b53d 100644 --- a/include/iocore/cache/CacheDefs.h +++ b/include/iocore/cache/CacheDefs.h @@ -95,6 +95,7 @@ enum CacheEventType { CACHE_EVENT_SCAN_OPERATION_BLOCKED = CACHE_EVENT_EVENTS_START + 23, CACHE_EVENT_SCAN_OPERATION_FAILED = CACHE_EVENT_EVENTS_START + 24, CACHE_EVENT_SCAN_DONE = CACHE_EVENT_EVENTS_START + 25, + CACHE_EVENT_OPEN_DIR_RETRY = CACHE_EVENT_EVENTS_START + 26, ////////////////////////// // Internal error codes // ////////////////////////// diff --git a/include/tsutil/Bravo.h b/include/tsutil/Bravo.h index 4660f69aca6..da26b828aba 100644 --- a/include/tsutil/Bravo.h +++ b/include/tsutil/Bravo.h @@ -372,4 +372,195 @@ template ; +/** + ts::bravo::recursive_shared_mutex_impl + + A recursive version of shared_mutex_impl that allows the same thread + to acquire exclusive and shared locks multiple times. + + Uses DenseThreadId for efficient per-thread state tracking without map overhead. + Optimized to minimize expensive std::this_thread::get_id() calls by using + DenseThreadId for ownership tracking. + + Mixed lock semantics: + - Upgrade prevention: A thread holding a shared lock cannot acquire an exclusive lock + (would cause deadlock). try_lock() returns false, lock() asserts. + - Downgrade allowed: A thread holding an exclusive lock can acquire a shared lock. + */ +template , size_t SLOT_SIZE = 256> class recursive_shared_mutex_impl +{ + // Use a sentinel value for "no owner" - DenseThreadId values are 0 to SLOT_SIZE-1 + static constexpr size_t NO_OWNER = SLOT_SIZE; + +public: + recursive_shared_mutex_impl() = default; + ~recursive_shared_mutex_impl() = default; + + // No copying or moving + recursive_shared_mutex_impl(recursive_shared_mutex_impl const &) = delete; + recursive_shared_mutex_impl &operator=(recursive_shared_mutex_impl const &) = delete; + recursive_shared_mutex_impl(recursive_shared_mutex_impl &&) = delete; + recursive_shared_mutex_impl &operator=(recursive_shared_mutex_impl &&) = delete; + + //// + // Exclusive locking (recursive) + // + void + lock() + { + size_t tid = DenseThreadId::self(); + // Fast path: check if we already own the lock + if (_exclusive_owner.load(std::memory_order_relaxed) == tid) { + ++_exclusive_count; + return; + } + // Upgrade prevention: cannot acquire exclusive lock while holding shared lock + ThreadState &state = _thread_states[tid]; + debug_assert(state.shared_count == 0); + _mutex.lock(); + _exclusive_owner.store(tid, std::memory_order_relaxed); + _exclusive_count = 1; + } + + bool + try_lock() + { + size_t tid = DenseThreadId::self(); + // Fast path: check if we already own the lock + if (_exclusive_owner.load(std::memory_order_relaxed) == tid) { + ++_exclusive_count; + return true; + } + // Upgrade prevention: cannot acquire exclusive lock while holding shared lock + ThreadState &state = _thread_states[tid]; + if (state.shared_count > 0) { + return false; + } + if (_mutex.try_lock()) { + _exclusive_owner.store(tid, std::memory_order_relaxed); + _exclusive_count = 1; + return true; + } + return false; + } + + void + unlock() + { + if (--_exclusive_count == 0) { + _exclusive_owner.store(NO_OWNER, std::memory_order_relaxed); + _mutex.unlock(); + } + } + + //// + // Shared locking (recursive) + // + void + lock_shared(Token &token) + { + size_t tid = DenseThreadId::self(); + ThreadState &state = _thread_states[tid]; + + // Fast path: already holding shared lock - just increment count (most common case) + size_t count = state.shared_count; + if (count > 0) { + state.shared_count = count + 1; + token = state.cached_token; + return; + } + + // Check for downgrade: if we hold exclusive lock, allow shared lock without acquiring underlying + if (_exclusive_owner.load(std::memory_order_relaxed) == tid) { + state.shared_count = 1; + token = 0; // Special token indicating we're under exclusive lock + return; + } + + // Slow path: acquire underlying lock + _mutex.lock_shared(state.cached_token); + state.shared_count = 1; + token = state.cached_token; + } + + bool + try_lock_shared(Token &token) + { + size_t tid = DenseThreadId::self(); + ThreadState &state = _thread_states[tid]; + + // Fast path: already holding shared lock - just increment count (most common case) + size_t count = state.shared_count; + if (count > 0) { + state.shared_count = count + 1; + token = state.cached_token; + return true; + } + + // Check for downgrade: if we hold exclusive lock, allow shared lock without acquiring underlying + if (_exclusive_owner.load(std::memory_order_relaxed) == tid) { + state.shared_count = 1; + token = 0; // Special token indicating we're under exclusive lock + return true; + } + + // Slow path: try to acquire underlying lock + if (_mutex.try_lock_shared(state.cached_token)) { + state.shared_count = 1; + token = state.cached_token; + return true; + } + return false; + } + + void + unlock_shared(const Token /* token */) + { + size_t tid = DenseThreadId::self(); + ThreadState &state = _thread_states[tid]; + if (--state.shared_count == 0) { + // Only unlock underlying mutex if we're not holding exclusive lock + if (_exclusive_owner.load(std::memory_order_relaxed) != tid) { + _mutex.unlock_shared(state.cached_token); + } + state.cached_token = 0; + } + } + + // Extensions to check + bool + has_unique_lock() + { + return _exclusive_owner.load(std::memory_order_relaxed) == DenseThreadId::self(); + } + + bool + has_shared_lock() + { + size_t tid = DenseThreadId::self(); + ThreadState &state = _thread_states[tid]; + + if (state.shared_count > 0) { + return true; + } else if (_exclusive_owner.load(std::memory_order_relaxed) == tid) { + return true; + } else { + return false; + } + } + +private: + struct ThreadState { + size_t shared_count{0}; + Token cached_token{0}; + }; + + T _mutex; + std::atomic _exclusive_owner{NO_OWNER}; + size_t _exclusive_count{0}; + std::array _thread_states{}; +}; + +using recursive_shared_mutex = recursive_shared_mutex_impl<>; + } // namespace ts::bravo diff --git a/src/iocore/cache/Cache.cc b/src/iocore/cache/Cache.cc index 512a5e7bf76..67293c2ae53 100644 --- a/src/iocore/cache/Cache.cc +++ b/src/iocore/cache/Cache.cc @@ -109,6 +109,28 @@ DbgCtl dbg_ctl_cache_init{"cache_init"}; DbgCtl dbg_ctl_cache_hosting{"cache_hosting"}; DbgCtl dbg_ctl_cache_update{"cache_update"}; +CacheVC * +new_CacheVC_for_read(Continuation *cont, const CacheKey *key, CacheHTTPHdr *request, const HttpConfigAccessor *params, + StripeSM *stripe) +{ + CacheVC *cache_vc = new_CacheVC(cont); + + cache_vc->first_key = *key; + cache_vc->key = *key; + cache_vc->earliest_key = *key; + cache_vc->stripe = stripe; + cache_vc->vio.op = VIO::READ; + cache_vc->op_type = static_cast(CacheOpType::Read); + cache_vc->frag_type = CACHE_FRAG_TYPE_HTTP; + cache_vc->params = params; + cache_vc->request.copy_shallow(request); + + ts::Metrics::Gauge::increment(cache_rsb.status[cache_vc->op_type].active); + ts::Metrics::Gauge::increment(stripe->cache_vol->vol_rsb.status[cache_vc->op_type].active); + + return cache_vc; +} + } // end anonymous namespace // Global list of the volumes created @@ -543,20 +565,24 @@ Cache::open_read(Continuation *cont, const CacheKey *key, CacheHTTPHdr *request, OpenDirEntry *od = nullptr; CacheVC *c = nullptr; + // Read-While-Writer + // This OpenDirEntry lookup doesn't need stripe mutex lock because OpenDir has own reader-writer lock + od = stripe->open_read(key); + if (od != nullptr) { + c = new_CacheVC_for_read(cont, key, request, params, stripe); + c->od = od; + cont->handleEvent(CACHE_EVENT_OPEN_READ_RWW, nullptr); + SET_CONTINUATION_HANDLER(c, &CacheVC::openReadFromWriter); + if (c->handleEvent(EVENT_IMMEDIATE, nullptr) == EVENT_DONE) { + return ACTION_RESULT_DONE; + } + return &c->_action; + } + { CACHE_TRY_LOCK(lock, stripe->mutex, mutex->thread_holding); - if (!lock.is_locked() || (od = stripe->open_read(key)) || stripe->directory.probe(key, stripe, &result, &last_collision)) { - c = new_CacheVC(cont); - c->first_key = c->key = c->earliest_key = *key; - c->stripe = stripe; - c->vio.op = VIO::READ; - c->op_type = static_cast(CacheOpType::Read); - ts::Metrics::Gauge::increment(cache_rsb.status[c->op_type].active); - ts::Metrics::Gauge::increment(stripe->cache_vol->vol_rsb.status[c->op_type].active); - c->request.copy_shallow(request); - c->frag_type = CACHE_FRAG_TYPE_HTTP; - c->params = params; - c->od = od; + if (!lock.is_locked() || stripe->directory.probe(key, stripe, &result, &last_collision)) { + c = new_CacheVC_for_read(cont, key, request, params, stripe); } if (!lock.is_locked()) { SET_CONTINUATION_HANDLER(c, &CacheVC::openReadStartHead); @@ -566,9 +592,7 @@ Cache::open_read(Continuation *cont, const CacheKey *key, CacheHTTPHdr *request, if (!c) { goto Lmiss; } - if (c->od) { - goto Lwriter; - } + // hit c->dir = c->first_dir = result; c->last_collision = last_collision; @@ -587,13 +611,6 @@ Cache::open_read(Continuation *cont, const CacheKey *key, CacheHTTPHdr *request, ts::Metrics::Counter::increment(stripe->cache_vol->vol_rsb.status[static_cast(CacheOpType::Read)].failure); cont->handleEvent(CACHE_EVENT_OPEN_READ_FAILED, reinterpret_cast(-ECACHE_NO_DOC)); return ACTION_RESULT_DONE; -Lwriter: - cont->handleEvent(CACHE_EVENT_OPEN_READ_RWW, nullptr); - SET_CONTINUATION_HANDLER(c, &CacheVC::openReadFromWriter); - if (c->handleEvent(EVENT_IMMEDIATE, nullptr) == EVENT_DONE) { - return ACTION_RESULT_DONE; - } - return &c->_action; Lcallreturn: if (c->handleEvent(AIO_EVENT_DONE, nullptr) == EVENT_DONE) { return ACTION_RESULT_DONE; diff --git a/src/iocore/cache/CacheDir.cc b/src/iocore/cache/CacheDir.cc index 0cdb7251d1c..97532f85b47 100644 --- a/src/iocore/cache/CacheDir.cc +++ b/src/iocore/cache/CacheDir.cc @@ -28,12 +28,15 @@ #include "PreservationTable.h" #include "Stripe.h" +#include "iocore/eventsystem/Event.h" #include "tscore/hugepages.h" #include "tscore/Random.h" #include "ts/ats_probe.h" #include "iocore/eventsystem/Tasks.h" #include +#include "tsutil/Bravo.h" +#include #ifdef LOOP_CHECK_MODE #define DIR_LOOP_THRESHOLD 1000 @@ -66,7 +69,7 @@ ClassAllocator openDirEntryAllocator("openDirEntry"); // OpenDir -OpenDir::OpenDir() +OpenDir::OpenDir(StripeSM *s) : Continuation(new_ProxyMutex()), _stripe(s) { SET_HANDLER(&OpenDir::signal_readers); } @@ -81,10 +84,9 @@ OpenDir::OpenDir() int OpenDir::open_write(CacheVC *cont, int allow_if_writers, int max_writers) { - ink_assert(cont->stripe->mutex->thread_holding == this_ethread()); unsigned int h = cont->first_key.slice32(0); int b = h % OPEN_DIR_BUCKETS; - for (OpenDirEntry *d = bucket[b].head; d; d = d->link.next) { + for (OpenDirEntry *d = _bucket[b].head; d; d = d->link.next) { if (!(d->writers.head->first_key == cont->first_key)) { continue; } @@ -110,48 +112,63 @@ OpenDir::open_write(CacheVC *cont, int allow_if_writers, int max_writers) dir_clear(&od->first_dir); cont->od = od; cont->write_vector = &od->vector; - bucket[b].push(od); + _bucket[b].push(od); return 1; } +/** + This event handler is called in two cases: + + 1. Direct call from OpenDir::close_write - writer lock is already acquired + 2. Self retry through event system - need to acquire writer lock + */ int -OpenDir::signal_readers(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) +OpenDir::signal_readers(int event, Event * /* ATS UNUSED */) { - Queue newly_delayed_readers; - EThread *t = mutex->thread_holding; - CacheVC *c = nullptr; - while ((c = delayed_readers.dequeue())) { - CACHE_TRY_LOCK(lock, c->mutex, t); - if (lock.is_locked()) { - c->f.open_read_timeout = 0; - c->handleEvent(EVENT_IMMEDIATE, nullptr); - continue; + auto write_op = [&] { + Queue newly_delayed_readers; + EThread *t = mutex->thread_holding; + CacheVC *c = nullptr; + while ((c = _delayed_readers.dequeue())) { + CACHE_TRY_LOCK(lock, c->mutex, t); + if (lock.is_locked()) { + c->f.open_read_timeout = 0; + c->handleEvent(EVENT_IMMEDIATE, nullptr); + continue; + } + newly_delayed_readers.push(c); } - newly_delayed_readers.push(c); - } - if (newly_delayed_readers.head) { - delayed_readers = newly_delayed_readers; - EThread *t1 = newly_delayed_readers.head->mutex->thread_holding; - if (!t1) { - t1 = mutex->thread_holding; + if (newly_delayed_readers.head) { + _delayed_readers = newly_delayed_readers; + EThread *t1 = newly_delayed_readers.head->mutex->thread_holding; + if (!t1) { + t1 = mutex->thread_holding; + } + t1->schedule_in(this, HRTIME_MSECONDS(cache_config_mutex_retry_delay), CACHE_EVENT_OPEN_DIR_RETRY); } - t1->schedule_in(this, HRTIME_MSECONDS(cache_config_mutex_retry_delay)); + }; + + if (event == CACHE_EVENT_OPEN_DIR_RETRY) { + // self-retry comes from event system + _stripe->write_op(write_op); + } else { + write_op(); } - return 0; + + return EVENT_DONE; } int OpenDir::close_write(CacheVC *cont) { - ink_assert(cont->stripe->mutex->thread_holding == this_ethread()); cont->od->writers.remove(cont); cont->od->num_writers--; if (!cont->od->writers.head) { unsigned int h = cont->first_key.slice32(0); int b = h % OPEN_DIR_BUCKETS; - bucket[b].remove(cont->od); - delayed_readers.append(cont->od->readers); - signal_readers(0, nullptr); + _bucket[b].remove(cont->od); + _delayed_readers.append(cont->od->readers); + signal_readers(EVENT_CALL, nullptr); cont->od->vector.clear(); THREAD_FREE(cont->od, openDirEntryAllocator, cont->mutex->thread_holding); } @@ -164,7 +181,7 @@ OpenDir::open_read(const CryptoHash *key) const { unsigned int h = key->slice32(0); int b = h % OPEN_DIR_BUCKETS; - for (OpenDirEntry *d = bucket[b].head; d; d = d->link.next) { + for (OpenDirEntry *d = _bucket[b].head; d; d = d->link.next) { if (d->writers.head->first_key == *key) { return d; } diff --git a/src/iocore/cache/P_CacheDir.h b/src/iocore/cache/P_CacheDir.h index fbd2f0b25bc..21eef5b2340 100644 --- a/src/iocore/cache/P_CacheDir.h +++ b/src/iocore/cache/P_CacheDir.h @@ -29,6 +29,7 @@ #include "iocore/aio/AIO.h" #include "tscore/Version.h" #include "tscore/hugepages.h" +#include "tsutil/Bravo.h" #include #include @@ -226,16 +227,28 @@ struct OpenDirEntry { } }; -struct OpenDir : public Continuation { - Queue delayed_readers; - DLL bucket[OPEN_DIR_BUCKETS]; +/** + Owned by StripeSM. All access to this OpenDir requires lock guard of StripeSM::_shared_mutex. + */ +class OpenDir : public Continuation +{ +public: + OpenDir(StripeSM *s); - int open_write(CacheVC *c, int allow_if_writers, int max_writers); - int close_write(CacheVC *c); + // writer + int open_write(CacheVC *c, int allow_if_writers, int max_writers); + int close_write(CacheVC *c); + // reader OpenDirEntry *open_read(const CryptoHash *key) const; - int signal_readers(int event, Event *e); - OpenDir(); + // event handler + int signal_readers(int event, Event *e); + +private: + Queue _delayed_readers; + DLL _bucket[OPEN_DIR_BUCKETS]; + + StripeSM *_stripe; }; struct CacheSync : public Continuation { diff --git a/src/iocore/cache/StripeSM.cc b/src/iocore/cache/StripeSM.cc index 2e39fc30980..eb02f15fbd8 100644 --- a/src/iocore/cache/StripeSM.cc +++ b/src/iocore/cache/StripeSM.cc @@ -117,9 +117,9 @@ StripeSM::StripeSM(CacheDisk *disk, off_t blocks, off_t dir_skip, int avg_obj_si Stripe{disk, blocks, dir_skip, avg_obj_size, fragment_size}, fd{disk->fd}, disk{disk}, - _preserved_dirs{len} + _preserved_dirs{len}, + _open_dir(this) { - open_dir.mutex = this->mutex; SET_HANDLER(&StripeSM::aggWrite); } @@ -1381,8 +1381,12 @@ StripeSM::open_write(CacheVC *cont, int allow_if_writers, int max_writers) return ECACHE_WRITE_FAIL; } - if (open_dir.open_write(cont, allow_if_writers, max_writers)) { - return 0; + { + std::lock_guard lock(_shared_mutex); + + if (_open_dir.open_write(cont, allow_if_writers, max_writers)) { + return 0; + } } return ECACHE_DOC_BUSY; } @@ -1401,7 +1405,9 @@ StripeSM::open_write_lock(CacheVC *cont, int allow_if_writers, int max_writers) int StripeSM::close_write(CacheVC *cont) { - return open_dir.close_write(cont); + std::lock_guard lock(_shared_mutex); + + return _open_dir.close_write(cont); } void diff --git a/src/iocore/cache/StripeSM.h b/src/iocore/cache/StripeSM.h index 40bb972babf..4505c3fbb3e 100644 --- a/src/iocore/cache/StripeSM.h +++ b/src/iocore/cache/StripeSM.h @@ -34,6 +34,7 @@ #include "tscore/CryptoHash.h" #include "tscore/List.h" +#include "tsutil/Bravo.h" #include @@ -78,7 +79,6 @@ class StripeSM : public Continuation, public Stripe CacheDisk *disk{}; - OpenDir open_dir; RamCache *ram_cache = nullptr; DLL lookaside[LOOKASIDE_SIZE]; CacheEvacuateDocVC *doc_evacuator = nullptr; @@ -102,14 +102,15 @@ class StripeSM : public Continuation, public Stripe int recover_data(); - int open_write(CacheVC *cont, int allow_if_writers, int max_writers); - int open_write_lock(CacheVC *cont, int allow_if_writers, int max_writers); - int close_write(CacheVC *cont); - int begin_read(CacheVC *cont) const; - // unused read-write interlock code - // currently http handles a write-lock failure by retrying the read + // OpenDir API + int open_write(CacheVC *cont, int allow_if_writers, int max_writers); + int open_write_lock(CacheVC *cont, int allow_if_writers, int max_writers); + int close_write(CacheVC *cont); OpenDirEntry *open_read(const CryptoHash *key) const; - int close_read(CacheVC *cont) const; + + // PreservationTable API + int begin_read(CacheVC *cont) const; + int close_read(CacheVC *cont) const; int clear_dir_aio(); int clear_dir(); @@ -238,8 +239,21 @@ class StripeSM : public Continuation, public Stripe return this->_preserved_dirs; } + // shared_mutex lock guard helpers + template U read_op(Func) const; + template U write_op(Func); + private: - mutable PreservationTable _preserved_dirs; + /** + Reader-Writer lock to cover OpenDir access. + For now, only OpenDir access is covered, but when we clarify other functions as reader or writer, we can use this shared_mutex + for them to reduce current heavy lock contention of StripeSM::mutex. + */ + mutable ts::bravo::recursive_shared_mutex _shared_mutex; + mutable PreservationTable _preserved_dirs; + + // All access to this OpenDir requires _shared_mutex lock guard + OpenDir _open_dir; int _agg_copy(CacheVC *vc); int _copy_writer_to_aggregation(CacheVC *vc); @@ -253,6 +267,24 @@ extern std::atomic gnstripes; extern ClassAllocator openDirEntryAllocator; extern unsigned short *vol_hash_table; +template +U +StripeSM::read_op(Func read_op) const +{ + ts::bravo::shared_lock lock(_shared_mutex); + + return read_op(); +} + +template +U +StripeSM::write_op(Func write_op) +{ + std::lock_guard lock(_shared_mutex); + + return write_op(); +} + // inline Functions inline void @@ -267,7 +299,9 @@ StripeSM::cancel_trigger() inline OpenDirEntry * StripeSM::open_read(const CryptoHash *key) const { - return open_dir.open_read(key); + ts::bravo::shared_lock lock(_shared_mutex); + + return _open_dir.open_read(key); } inline int diff --git a/src/tsutil/unit_tests/test_Bravo.cc b/src/tsutil/unit_tests/test_Bravo.cc index 1a825cd9460..ece2aa783d9 100644 --- a/src/tsutil/unit_tests/test_Bravo.cc +++ b/src/tsutil/unit_tests/test_Bravo.cc @@ -227,3 +227,543 @@ TEST_CASE("BRAVO - check with race", "[libts][BRAVO]") CHECK(i == 2); } } + +TEST_CASE("Recursive BRAVO - exclusive lock", "[libts][BRAVO]") +{ + SECTION("single lock/unlock") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + mutex.unlock(); + } + + SECTION("recursive lock/unlock") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + mutex.lock(); + mutex.lock(); + mutex.unlock(); + mutex.unlock(); + mutex.unlock(); + } + + SECTION("try_lock by owner succeeds") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + CHECK(mutex.try_lock() == true); + mutex.unlock(); + mutex.unlock(); + } + + SECTION("try_lock by non-owner fails") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + + std::thread t{[&mutex]() { CHECK(mutex.try_lock() == false); }}; + t.join(); + + mutex.unlock(); + } + + SECTION("recursive try_lock") + { + ts::bravo::recursive_shared_mutex mutex; + CHECK(mutex.try_lock() == true); + CHECK(mutex.try_lock() == true); + CHECK(mutex.try_lock() == true); + mutex.unlock(); + mutex.unlock(); + mutex.unlock(); + } + + SECTION("writer-writer blocking") + { + ts::bravo::recursive_shared_mutex mutex; + int i = 0; + + std::thread t1{[&]() { + std::lock_guard lock(mutex); + std::this_thread::sleep_for(100ms); + CHECK(++i == 1); + }}; + + std::thread t2{[&]() { + std::this_thread::sleep_for(50ms); + std::lock_guard lock(mutex); + CHECK(++i == 2); + }}; + + t1.join(); + t2.join(); + + CHECK(i == 2); + } +} + +TEST_CASE("Recursive BRAVO - shared lock", "[libts][BRAVO]") +{ + SECTION("single shared lock/unlock") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token{0}; + mutex.lock_shared(token); + mutex.unlock_shared(token); + } + + SECTION("recursive shared lock/unlock") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + ts::bravo::Token token3{0}; + mutex.lock_shared(token1); + mutex.lock_shared(token2); + mutex.lock_shared(token3); + // All tokens should be the same (cached) + CHECK(token1 == token2); + CHECK(token2 == token3); + mutex.unlock_shared(token3); + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + } + + SECTION("try_lock_shared recursive") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + CHECK(mutex.try_lock_shared(token1) == true); + CHECK(mutex.try_lock_shared(token2) == true); + CHECK(token1 == token2); + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + } + + SECTION("multiple readers concurrent") + { + ts::bravo::recursive_shared_mutex mutex; + int i = 0; + + std::thread t1{[&]() { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + CHECK(i == 0); + std::this_thread::sleep_for(50ms); + mutex.unlock_shared(token); + }}; + + std::thread t2{[&]() { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + CHECK(i == 0); + std::this_thread::sleep_for(50ms); + mutex.unlock_shared(token); + }}; + + t1.join(); + t2.join(); + + CHECK(i == 0); + } + + SECTION("shared blocks exclusive") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token{0}; + mutex.lock_shared(token); + + std::thread t{[&mutex]() { CHECK(mutex.try_lock() == false); }}; + t.join(); + + mutex.unlock_shared(token); + } + + SECTION("exclusive blocks shared") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + + std::thread t{[&mutex]() { + ts::bravo::Token token{0}; + CHECK(mutex.try_lock_shared(token) == false); + }}; + t.join(); + + mutex.unlock(); + } +} + +TEST_CASE("Recursive BRAVO - mixed lock scenarios", "[libts][BRAVO]") +{ + SECTION("downgrade: exclusive owner can acquire shared lock") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + + // While holding exclusive lock, we can acquire shared lock + ts::bravo::Token token{0}; + mutex.lock_shared(token); + CHECK(token == 0); // Special token for downgrade + + mutex.unlock_shared(token); + mutex.unlock(); + } + + SECTION("downgrade: try_lock_shared succeeds for exclusive owner") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + + ts::bravo::Token token{0}; + CHECK(mutex.try_lock_shared(token) == true); + CHECK(token == 0); // Special token for downgrade + + mutex.unlock_shared(token); + mutex.unlock(); + } + + SECTION("upgrade prevention: try_lock fails when holding shared lock") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token{0}; + mutex.lock_shared(token); + + // Cannot upgrade: try_lock should fail + CHECK(mutex.try_lock() == false); + + mutex.unlock_shared(token); + } + + SECTION("downgrade with multiple shared locks") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + mutex.lock_shared(token1); + mutex.lock_shared(token2); + + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + mutex.unlock(); + } + + SECTION("proper unlock ordering: shared then exclusive") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + + ts::bravo::Token token{0}; + mutex.lock_shared(token); + + // Unlock shared first, then exclusive + mutex.unlock_shared(token); + mutex.unlock(); + + // Mutex should be fully unlocked now + CHECK(mutex.try_lock() == true); + mutex.unlock(); + } + + SECTION("nested exclusive locks with shared in between") + { + ts::bravo::recursive_shared_mutex mutex; + mutex.lock(); + mutex.lock(); // Recursive exclusive + + ts::bravo::Token token{0}; + mutex.lock_shared(token); + + mutex.unlock_shared(token); + mutex.unlock(); // Second exclusive + mutex.unlock(); // First exclusive + + // Mutex should be fully unlocked now + CHECK(mutex.try_lock() == true); + mutex.unlock(); + } +} + +TEST_CASE("Recursive BRAVO - BRAVO optimizations", "[libts][BRAVO]") +{ + SECTION("first shared lock gets token from underlying BRAVO mutex") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token{0}; + mutex.lock_shared(token); + // Token should be set by underlying BRAVO mutex (0 = slow path, >0 = fast path) + // We can't guarantee which path is taken, but the lock should succeed + mutex.unlock_shared(token); + } + + SECTION("recursive shared locks reuse cached token") + { + ts::bravo::recursive_shared_mutex mutex; + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + ts::bravo::Token token3{0}; + + mutex.lock_shared(token1); + mutex.lock_shared(token2); + mutex.lock_shared(token3); + + // All tokens should be identical (cached from first lock) + CHECK(token1 == token2); + CHECK(token2 == token3); + + mutex.unlock_shared(token3); + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + } + + SECTION("writer revocation then reader works") + { + ts::bravo::recursive_shared_mutex mutex; + + // First, acquire and release a shared lock to enable read_bias + { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + mutex.unlock_shared(token); + } + + // Writer acquires lock (triggers revocation) + mutex.lock(); + mutex.unlock(); + + // Reader should still work after writer releases + { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + mutex.unlock_shared(token); + } + } + + SECTION("multiple readers then writer then readers") + { + ts::bravo::recursive_shared_mutex mutex; + std::atomic readers_done{0}; + + // Start multiple readers + std::thread t1{[&]() { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + std::this_thread::sleep_for(50ms); + mutex.unlock_shared(token); + ++readers_done; + }}; + + std::thread t2{[&]() { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + std::this_thread::sleep_for(50ms); + mutex.unlock_shared(token); + ++readers_done; + }}; + + // Wait for readers to finish + t1.join(); + t2.join(); + CHECK(readers_done == 2); + + // Writer acquires lock + mutex.lock(); + mutex.unlock(); + + // More readers after writer + std::thread t3{[&]() { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + mutex.unlock_shared(token); + ++readers_done; + }}; + + std::thread t4{[&]() { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + mutex.unlock_shared(token); + ++readers_done; + }}; + + t3.join(); + t4.join(); + CHECK(readers_done == 4); + } + + SECTION("recursive shared lock with concurrent writer") + { + ts::bravo::recursive_shared_mutex mutex; + std::atomic writer_done{false}; + + // Reader thread with recursive locks + std::thread reader{[&]() { + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + mutex.lock_shared(token1); + mutex.lock_shared(token2); // Recursive + CHECK(token1 == token2); // Should be same cached token + std::this_thread::sleep_for(100ms); + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + }}; + + // Writer thread tries to acquire after reader starts + std::thread writer{[&]() { + std::this_thread::sleep_for(50ms); + mutex.lock(); + writer_done = true; + mutex.unlock(); + }}; + + reader.join(); + writer.join(); + CHECK(writer_done == true); + } +} + +TEST_CASE("Recursive BRAVO - stress test", "[libts][BRAVO]") +{ + SECTION("concurrent readers with recursive locks") + { + ts::bravo::recursive_shared_mutex mutex; + std::atomic counter{0}; + constexpr int NUM_THREADS = 8; + constexpr int NUM_ITERATIONS = 1000; + + std::vector threads; + for (int i = 0; i < NUM_THREADS; ++i) { + threads.emplace_back([&]() { + for (int j = 0; j < NUM_ITERATIONS; ++j) { + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + mutex.lock_shared(token1); + mutex.lock_shared(token2); // Recursive + ++counter; + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + } + }); + } + + for (auto &t : threads) { + t.join(); + } + + CHECK(counter == NUM_THREADS * NUM_ITERATIONS); + } + + SECTION("concurrent writers with recursive locks") + { + ts::bravo::recursive_shared_mutex mutex; + int counter = 0; + constexpr int NUM_THREADS = 4; + constexpr int NUM_ITERATIONS = 500; + + std::vector threads; + for (int i = 0; i < NUM_THREADS; ++i) { + threads.emplace_back([&]() { + for (int j = 0; j < NUM_ITERATIONS; ++j) { + mutex.lock(); + mutex.lock(); // Recursive + ++counter; + mutex.unlock(); + mutex.unlock(); + } + }); + } + + for (auto &t : threads) { + t.join(); + } + + CHECK(counter == NUM_THREADS * NUM_ITERATIONS); + } + + SECTION("mixed readers and writers") + { + ts::bravo::recursive_shared_mutex mutex; + std::atomic read_counter{0}; + int write_counter = 0; + constexpr int NUM_READERS = 6; + constexpr int NUM_WRITERS = 2; + constexpr int NUM_ITERATIONS = 500; + + std::vector threads; + + // Reader threads + for (int i = 0; i < NUM_READERS; ++i) { + threads.emplace_back([&]() { + for (int j = 0; j < NUM_ITERATIONS; ++j) { + ts::bravo::Token token{0}; + mutex.lock_shared(token); + ++read_counter; + mutex.unlock_shared(token); + } + }); + } + + // Writer threads + for (int i = 0; i < NUM_WRITERS; ++i) { + threads.emplace_back([&]() { + for (int j = 0; j < NUM_ITERATIONS; ++j) { + mutex.lock(); + ++write_counter; + mutex.unlock(); + } + }); + } + + for (auto &t : threads) { + t.join(); + } + + CHECK(read_counter == NUM_READERS * NUM_ITERATIONS); + CHECK(write_counter == NUM_WRITERS * NUM_ITERATIONS); + } + + SECTION("recursive mixed locks under contention") + { + ts::bravo::recursive_shared_mutex mutex; + std::atomic counter{0}; + constexpr int NUM_THREADS = 4; + constexpr int NUM_ITERATIONS = 200; + + std::vector threads; + for (int i = 0; i < NUM_THREADS; ++i) { + threads.emplace_back([&, i]() { + for (int j = 0; j < NUM_ITERATIONS; ++j) { + if (i % 2 == 0) { + // Even threads: exclusive with downgrade + mutex.lock(); + mutex.lock(); // Recursive exclusive + ts::bravo::Token token{0}; + mutex.lock_shared(token); // Downgrade + ++counter; + mutex.unlock_shared(token); + mutex.unlock(); + mutex.unlock(); + } else { + // Odd threads: shared recursive + ts::bravo::Token token1{0}; + ts::bravo::Token token2{0}; + mutex.lock_shared(token1); + mutex.lock_shared(token2); + ++counter; + mutex.unlock_shared(token2); + mutex.unlock_shared(token1); + } + } + }); + } + + for (auto &t : threads) { + t.join(); + } + + CHECK(counter == NUM_THREADS * NUM_ITERATIONS); + } +}