Skip to content

Commit

Permalink
memory usage optimization, deep cleanup, WithSize option
Browse files Browse the repository at this point in the history
  • Loading branch information
parMaster committed Jun 26, 2024
1 parent ad04285 commit 0cd8884
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 16 deletions.
30 changes: 21 additions & 9 deletions main.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Package provides simple, fast, thread-safe in-memory cache with by-key TTL expiration.
// Supporting generic types.
// Supporting generic value types.
package mcache

import (
Expand All @@ -23,7 +23,8 @@ type CacheItem[T any] struct {

// Cache is a struct for cache.
type Cache[T any] struct {
data map[string]CacheItem[T]
initialSize int
data map[string]*CacheItem[T]
sync.RWMutex
}

Expand All @@ -40,7 +41,7 @@ type Cacher[T any] interface {
// NewCache is a constructor for Cache.
func NewCache[T any](options ...func(*Cache[T])) *Cache[T] {
c := &Cache[T]{
data: make(map[string]CacheItem[T]),
data: make(map[string]*CacheItem[T]),
}

for _, option := range options {
Expand Down Expand Up @@ -79,7 +80,7 @@ func (c *Cache[T]) Set(key string, value T, ttl time.Duration) error {
expiration = time.Now().Add(ttl)
}

c.data[key] = CacheItem[T]{
c.data[key] = &CacheItem[T]{
value: value,
expiration: expiration,
}
Expand Down Expand Up @@ -150,20 +151,22 @@ func (c *Cache[T]) Del(key string) error {
// Clears cache by replacing it with a clean one.
func (c *Cache[T]) Clear() error {
c.Lock()
c.data = make(map[string]CacheItem[T])
c.data = make(map[string]*CacheItem[T], c.initialSize)
c.Unlock()
return nil
}

// Cleanup deletes expired keys from cache.
// Cleanup deletes expired keys from cache by copying non-expired keys to a new map.
func (c *Cache[T]) Cleanup() {
c.Lock()
defer c.Unlock()
data := make(map[string]*CacheItem[T], c.initialSize)
for k, v := range c.data {
if v.expired() {
delete(c.data, k)
if !v.expired() {
data[k] = v
}
}
c.Unlock()
c.data = data
}

// WithCleanup is a functional option for setting interval to run Cleanup goroutine.
Expand All @@ -177,3 +180,12 @@ func WithCleanup[T any](ttl time.Duration) func(*Cache[T]) {
}()
}
}

// WithSize is a functional option for setting cache initial size. So it won't grow dynamically,
// go will allocate appropriate number of buckets.
func WithSize[T any](size int) func(*Cache[T]) {
return func(c *Cache[T]) {
c.data = make(map[string]*CacheItem[T], size)
c.initialSize = size
}
}
68 changes: 61 additions & 7 deletions main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@ package mcache

import (
"fmt"
"log"
"runtime"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
Expand Down Expand Up @@ -122,10 +126,12 @@ func TestConcurrentSetAndGet(t *testing.T) {

// Start multiple goroutines to concurrently set and get values
numGoroutines := 10000
done := make(chan bool)
wg := sync.WaitGroup{}
wg.Add(numGoroutines)

for i := 0; i < numGoroutines; i++ {
go func(index int) {
defer wg.Done()
key := fmt.Sprintf("key-%d", index)
value := fmt.Sprintf("value-%d", index)

Expand All @@ -142,15 +148,10 @@ func TestConcurrentSetAndGet(t *testing.T) {
if result != value {
t.Errorf("Expected value %s for key %s, but got %s", value, key, result)
}

done <- true
}(i)
}

// Wait for all goroutines to finish
for i := 0; i < numGoroutines; i++ {
<-done
}
wg.Wait()
}

// catching the situation when the key is deleted before the value is retrieved
Expand Down Expand Up @@ -191,6 +192,59 @@ func TestWithCleanup(t *testing.T) {
}
}

func getAlloc() uint64 {
var m runtime.MemStats
runtime.ReadMemStats(&m)
return m.Alloc
}

func printAlloc(message string) {
log.Printf("%s %d KB\n", message, getAlloc()/1024)
}

func TestWithSize(t *testing.T) {
size := 10_000
printAlloc("Before")
cache := NewCache(WithSize[string](size))
memBefore := getAlloc()
for iter := 0; iter < 10; iter++ {
printAlloc("After NewCache")

for i := 0; i < size; i++ {
key := fmt.Sprintf("key-%d", i)
value := fmt.Sprintf("value-%d", i)

err := cache.Set(key, value, time.Second)
assert.NoError(t, err)
}
printAlloc("After Set " + strconv.Itoa(size) + " entries")

cache.Cleanup()
printAlloc("After Cleanup")

// Check that the value has been deleted
_, err := cache.Get("key_1")
assert.Error(t, err, "Expected the key to be deleted")
time.Sleep(time.Second)
}
runtime.GC() // force GC to clean up the cache, make sure it's not leaking
printAlloc("After")
memAfter := getAlloc()
assert.Less(t, memAfter, memBefore*2, "Memory usage should not grow more than twice")
}

func TestThreadSafeCleanup(t *testing.T) {
cache := NewCache[string]()
for i := 0; i < 100; i++ {
cache.Set("key_"+strconv.Itoa(i), "value", time.Duration(10)*time.Millisecond)
go cache.Cleanup()
cache.Set("key_"+strconv.Itoa(i), "value", time.Duration(20)*time.Millisecond)
go cache.Cleanup()
cache.Set("key_"+strconv.Itoa(i), "value", time.Duration(30)*time.Millisecond)
go cache.Cleanup()
}
}

func TestMain(m *testing.M) {
// Enable the race detector
m.Run()
Expand Down

0 comments on commit 0cd8884

Please sign in to comment.