Skip to content

Commit

Permalink
perf: Clear cacheKV RAM if its too large (#15767)
Browse files Browse the repository at this point in the history
Co-authored-by: Marko <[email protected]>
  • Loading branch information
ValarDragon and tac0turtle authored May 11, 2023
1 parent e6808cf commit d39f645
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 21 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (baseapp) [#15023](https://github.com/cosmos/cosmos-sdk/pull/15023) & [#15213](https://github.com/cosmos/cosmos-sdk/pull/15213) Add `MessageRouter` interface to baseapp and pass it to authz, gov and groups instead of concrete type.
* (simtestutil) [#15305](https://github.com/cosmos/cosmos-sdk/pull/15305) Add `AppStateFnWithExtendedCb` with callback function to extend rawState.
* (x/consensus) [#15553](https://github.com/cosmos/cosmos-sdk/pull/15553) Migrate consensus module to use collections
* (store/cachekv) [#15767](https://github.com/cosmos/cosmos-sdk/pull/15767) Reduce peak RAM usage during and after InitGenesis
* (x/bank) [#15764](https://github.com/cosmos/cosmos-sdk/pull/15764) Speedup x/bank InitGenesis
* (x/auth) [#15867](https://github.com/cosmos/cosmos-sdk/pull/15867) Support better logging for signature verification failure.
* (types/query) [#16041](https://github.com/cosmos/cosmos-sdk/pull/16041) change pagination max limit to a variable in order to be modifed by application devs
Expand Down
60 changes: 39 additions & 21 deletions store/cachekv/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,28 @@ func (store *Store) Delete(key []byte) {
store.setCacheValue(key, nil, true)
}

func (store *Store) resetCaches() {
if len(store.cache) > 100_000 {
// Cache is too large. We likely did something linear time
// (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated.
// TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation.
// 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem.
store.cache = make(map[string]*cValue)
store.unsortedCache = make(map[string]struct{})
} else {
// Clear the cache using the map clearing idiom
// and not allocating fresh objects.
// Please see https://bencher.orijtech.com/perfclinic/mapclearing/
for key := range store.cache {
delete(store.cache, key)
}
for key := range store.unsortedCache {
delete(store.unsortedCache, key)
}
}
store.sortedCache = internal.NewBTree()
}

// Implements Cachetypes.KVStore.
func (store *Store) Write() {
store.mtx.Lock()
Expand All @@ -103,44 +125,40 @@ func (store *Store) Write() {
return
}

type cEntry struct {
key string
val *cValue
}

// We need a copy of all of the keys.
// Not the best, but probably not a bottleneck depending.
keys := make([]string, 0, len(store.cache))
// Not the best. To reduce RAM pressure, we copy the values as well
// and clear out the old caches right after the copy.
sortedCache := make([]cEntry, 0, len(store.cache))

for key, dbValue := range store.cache {
if dbValue.dirty {
keys = append(keys, key)
sortedCache = append(sortedCache, cEntry{key, dbValue})
}
}

sort.Strings(keys)
store.resetCaches()
sort.Slice(sortedCache, func(i, j int) bool {
return sortedCache[i].key < sortedCache[j].key
})

// TODO: Consider allowing usage of Batch, which would allow the write to
// at least happen atomically.
for _, key := range keys {
for _, obj := range sortedCache {
// We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot
// be sure if the underlying store might do a save with the byteslice or
// not. Once we get confirmation that .Delete is guaranteed not to
// save the byteslice, then we can assume only a read-only copy is sufficient.
cacheValue := store.cache[key]
if cacheValue.value != nil {
if obj.val.value != nil {
// It already exists in the parent, hence update it.
store.parent.Set([]byte(key), cacheValue.value)
store.parent.Set([]byte(obj.key), obj.val.value)
} else {
store.parent.Delete([]byte(key))
store.parent.Delete([]byte(obj.key))
}
}

// Clear the cache using the map clearing idiom
// and not allocating fresh objects.
// Please see https://bencher.orijtech.com/perfclinic/mapclearing/
for key := range store.cache {
delete(store.cache, key)
}
for key := range store.unsortedCache {
delete(store.unsortedCache, key)
}
store.sortedCache = internal.NewBTree()
}

// CacheWrap implements CacheWrapper.
Expand Down

0 comments on commit d39f645

Please sign in to comment.