summaryrefslogtreecommitdiffstats
path: root/libnetdata
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-06-20 11:21:06 +0300
committerGitHub <noreply@github.com>2023-06-20 11:21:06 +0300
commit554855bd5c7c1d02e87a2cd5e23f3084af6607f6 (patch)
treebf9d4d4b82f77ad277ad46363a860afb965a0a2a /libnetdata
parent7348c85c18b03f557487c3c976fa773f593e47c5 (diff)
RW_SPINLOCK: recursive readers support (#15217)
readers should be able to recursively acquire the lock, even when there is a writer waiting
Diffstat (limited to 'libnetdata')
-rw-r--r--libnetdata/locks/locks.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c
index 4061ba877e..0c02152f79 100644
--- a/libnetdata/locks/locks.c
+++ b/libnetdata/locks/locks.c
@@ -378,13 +378,20 @@ void rw_spinlock_read_unlock(RW_SPINLOCK *rw_spinlock) {
void rw_spinlock_write_lock(RW_SPINLOCK *rw_spinlock) {
static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
- spinlock_lock(&rw_spinlock->spinlock);
- size_t count = 0;
- while (__atomic_load_n(&rw_spinlock->readers, __ATOMIC_RELAXED) > 0) {
+ size_t spins = 0;
+ while(1) {
+ spins++;
+ spinlock_lock(&rw_spinlock->spinlock);
+
+ if(__atomic_load_n(&rw_spinlock->readers, __ATOMIC_RELAXED) == 0)
+ break;
+
// Busy wait until all readers have released their locks.
- if(++count > 1000)
- nanosleep(&ns, NULL);
+ spinlock_unlock(&rw_spinlock->spinlock);
+ nanosleep(&ns, NULL);
}
+
+ (void)spins;
}
void rw_spinlock_write_unlock(RW_SPINLOCK *rw_spinlock) {