From 0cd888438f4d67c1258eabf4c1b825df6ad565a4 Mon Sep 17 00:00:00 2001 From: parmaster Date: Thu, 27 Jun 2024 01:11:06 +0300 Subject: [PATCH] memory usage optimization, deep cleanup, WithSize option --- main.go | 30 ++++++++++++++++------- main_test.go | 68 ++++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 82 insertions(+), 16 deletions(-) diff --git a/main.go b/main.go index 77833e8..108f902 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,5 @@ // Package provides simple, fast, thread-safe in-memory cache with by-key TTL expiration. -// Supporting generic types. +// Supporting generic value types. package mcache import ( @@ -23,7 +23,8 @@ type CacheItem[T any] struct { // Cache is a struct for cache. type Cache[T any] struct { - data map[string]CacheItem[T] + initialSize int + data map[string]*CacheItem[T] sync.RWMutex } @@ -40,7 +41,7 @@ type Cacher[T any] interface { // NewCache is a constructor for Cache. func NewCache[T any](options ...func(*Cache[T])) *Cache[T] { c := &Cache[T]{ - data: make(map[string]CacheItem[T]), + data: make(map[string]*CacheItem[T]), } for _, option := range options { @@ -79,7 +80,7 @@ func (c *Cache[T]) Set(key string, value T, ttl time.Duration) error { expiration = time.Now().Add(ttl) } - c.data[key] = CacheItem[T]{ + c.data[key] = &CacheItem[T]{ value: value, expiration: expiration, } @@ -150,20 +151,22 @@ func (c *Cache[T]) Del(key string) error { // Clears cache by replacing it with a clean one. func (c *Cache[T]) Clear() error { c.Lock() - c.data = make(map[string]CacheItem[T]) + c.data = make(map[string]*CacheItem[T], c.initialSize) c.Unlock() return nil } -// Cleanup deletes expired keys from cache. +// Cleanup deletes expired keys from cache by copying non-expired keys to a new map. func (c *Cache[T]) Cleanup() { c.Lock() + defer c.Unlock() + data := make(map[string]*CacheItem[T], c.initialSize) for k, v := range c.data { - if v.expired() { - delete(c.data, k) + if !v.expired() { + data[k] = v } } - c.Unlock() + c.data = data } // WithCleanup is a functional option for setting interval to run Cleanup goroutine. @@ -177,3 +180,12 @@ func WithCleanup[T any](ttl time.Duration) func(*Cache[T]) { }() } } + +// WithSize is a functional option for setting cache initial size. So it won't grow dynamically, +// go will allocate appropriate number of buckets. +func WithSize[T any](size int) func(*Cache[T]) { + return func(c *Cache[T]) { + c.data = make(map[string]*CacheItem[T], size) + c.initialSize = size + } +} diff --git a/main_test.go b/main_test.go index 5f27282..d0ad2f0 100644 --- a/main_test.go +++ b/main_test.go @@ -2,6 +2,10 @@ package mcache import ( "fmt" + "log" + "runtime" + "strconv" + "sync" "sync/atomic" "testing" "time" @@ -122,10 +126,12 @@ func TestConcurrentSetAndGet(t *testing.T) { // Start multiple goroutines to concurrently set and get values numGoroutines := 10000 - done := make(chan bool) + wg := sync.WaitGroup{} + wg.Add(numGoroutines) for i := 0; i < numGoroutines; i++ { go func(index int) { + defer wg.Done() key := fmt.Sprintf("key-%d", index) value := fmt.Sprintf("value-%d", index) @@ -142,15 +148,10 @@ func TestConcurrentSetAndGet(t *testing.T) { if result != value { t.Errorf("Expected value %s for key %s, but got %s", value, key, result) } - - done <- true }(i) } - // Wait for all goroutines to finish - for i := 0; i < numGoroutines; i++ { - <-done - } + wg.Wait() } // catching the situation when the key is deleted before the value is retrieved @@ -191,6 +192,59 @@ func TestWithCleanup(t *testing.T) { } } +func getAlloc() uint64 { + var m runtime.MemStats + runtime.ReadMemStats(&m) + return m.Alloc +} + +func printAlloc(message string) { + log.Printf("%s %d KB\n", message, getAlloc()/1024) +} + +func TestWithSize(t *testing.T) { + size := 10_000 + printAlloc("Before") + cache := NewCache(WithSize[string](size)) + memBefore := getAlloc() + for iter := 0; iter < 10; iter++ { + printAlloc("After NewCache") + + for i := 0; i < size; i++ { + key := fmt.Sprintf("key-%d", i) + value := fmt.Sprintf("value-%d", i) + + err := cache.Set(key, value, time.Second) + assert.NoError(t, err) + } + printAlloc("After Set " + strconv.Itoa(size) + " entries") + + cache.Cleanup() + printAlloc("After Cleanup") + + // Check that the value has been deleted + _, err := cache.Get("key_1") + assert.Error(t, err, "Expected the key to be deleted") + time.Sleep(time.Second) + } + runtime.GC() // force GC to clean up the cache, make sure it's not leaking + printAlloc("After") + memAfter := getAlloc() + assert.Less(t, memAfter, memBefore*2, "Memory usage should not grow more than twice") +} + +func TestThreadSafeCleanup(t *testing.T) { + cache := NewCache[string]() + for i := 0; i < 100; i++ { + cache.Set("key_"+strconv.Itoa(i), "value", time.Duration(10)*time.Millisecond) + go cache.Cleanup() + cache.Set("key_"+strconv.Itoa(i), "value", time.Duration(20)*time.Millisecond) + go cache.Cleanup() + cache.Set("key_"+strconv.Itoa(i), "value", time.Duration(30)*time.Millisecond) + go cache.Cleanup() + } +} + func TestMain(m *testing.M) { // Enable the race detector m.Run()