summaryrefslogtreecommitdiffstats
path: root/database
diff options
context:
space:
mode:
authorCosta Tsaousis <costa@netdata.cloud>2023-01-12 00:41:12 +0200
committerGitHub <noreply@github.com>2023-01-12 00:41:12 +0200
commita844215ac9adf197397b23c7bcd884079940249b (patch)
tree8c9d56c5cca9a83c814b771fcb4e117970671170 /database
parent3a77de1f54b152e677f9261bfdf38532e0151f72 (diff)
allow the cache to grow when huge queries are running that exceed the cache size (#14247)
* allow the cache to grow when huge queries are running that exceed the size of the cache * queue preloading of queries * finalize prepared queries on timeout
Diffstat (limited to 'database')
-rw-r--r--database/engine/cache.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/database/engine/cache.c b/database/engine/cache.c
index 9226672598..1ce4f87e1c 100644
--- a/database/engine/cache.c
+++ b/database/engine/cache.c
@@ -273,14 +273,20 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
size_t wanted_cache_size;
size_t per1000;
+ size_t dirty = __atomic_load_n(&cache->dirty.stats->size, __ATOMIC_RELAXED);
+ size_t hot = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
+
if(cache->config.options & PGC_OPTIONS_AUTOSCALE) {
size_t dirty_max = __atomic_load_n(&cache->dirty.stats->max_size, __ATOMIC_RELAXED);
size_t hot_max = __atomic_load_n(&cache->hot.stats->max_size, __ATOMIC_RELAXED);
- size_t dirty = __atomic_load_n(&cache->dirty.stats->size, __ATOMIC_RELAXED);
- size_t hot = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
+ // our promise to users
size_t max_size1 = MAX(hot_max, hot) * 2;
+
+ // protection against slow flushing
size_t max_size2 = hot_max + ((dirty_max < hot_max / 2) ? hot_max / 2 : dirty_max * 2);
+
+ // the final wanted cache size
wanted_cache_size = MIN(max_size1, max_size2);
if(cache->config.dynamic_target_size_cb) {
@@ -292,12 +298,15 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
if (wanted_cache_size < hot + dirty + cache->config.clean_size)
wanted_cache_size = hot + dirty + cache->config.clean_size;
}
- else {
- size_t dirty = __atomic_load_n(&cache->dirty.stats->size, __ATOMIC_RELAXED);
- size_t hot = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
-
+ else
wanted_cache_size = hot + dirty + cache->config.clean_size;
- }
+
+ // protection again huge queries
+ // if huge queries are running, or huge amounts need to be saved
+ // allow the cache to grow more (hot pages in main cache are also referenced)
+ size_t referenced_size = __atomic_load_n(&cache->stats.referenced_size, __ATOMIC_RELAXED);
+ if(unlikely(wanted_cache_size < referenced_size * 2 / 3))
+ wanted_cache_size = referenced_size * 2 / 3;
current_cache_size = __atomic_load_n(&cache->stats.size, __ATOMIC_RELAXED);