diff --git a/export_test.go b/export_test.go index af6ae48..b35b34e 100644 --- a/export_test.go +++ b/export_test.go @@ -73,9 +73,9 @@ func CollectMapOfStats[K comparable, V any](m *MapOf[K, V]) MapStats { return MapStats{m.stats()} } -func NewMapOfPresizedWithHasher[K comparable, V any]( +func NewMapOfWithHasher[K comparable, V any]( hasher func(K, uint64) uint64, - sizeHint int, + options ...func(*MapConfig), ) *MapOf[K, V] { - return newMapOfPresized[K, V](hasher, sizeHint) + return newMapOf[K, V](hasher, options...) } diff --git a/map.go b/map.go index e86b180..92d73ac 100644 --- a/map.go +++ b/map.go @@ -75,6 +75,7 @@ type Map struct { resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) table unsafe.Pointer // *mapTable minTableLen int + growOnly bool } type mapTable struct { @@ -118,31 +119,70 @@ type rangeEntry struct { value unsafe.Pointer } -// NewMap creates a new Map instance. -func NewMap() *Map { - return NewMapPresized(defaultMinMapTableLen * entriesPerMapBucket) +// MapConfig defines configurable Map/MapOf options. +type MapConfig struct { + sizeHint int + growOnly bool } -// NewMapPresized creates a new Map instance with capacity enough to hold -// sizeHint entries. The capacity is treated as the minimal capacity -// meaning that the underlying hash table will never shrink to -// a smaller capacity. If sizeHint is zero or negative, the value +// WithPresize configures new Map/MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal +// capacity meaning that the underlying hash table will never shrink +// to a smaller capacity. If sizeHint is zero or negative, the value // is ignored. -func NewMapPresized(sizeHint int) *Map { +func WithPresize(sizeHint int) func(*MapConfig) { + return func(c *MapConfig) { + c.sizeHint = sizeHint + } +} + +// WithGrowOnly configures new Map/MapOf instance to be grow-only. +// This means that the underlying hash table grows in capacity when +// new keys are added, but does not shrink when keys are deleted. +// The only exception to this rule is the Clear method which +// shrinks the hash table back to the initial capacity. +func WithGrowOnly() func(*MapConfig) { + return func(c *MapConfig) { + c.growOnly = true + } +} + +// NewMap creates a new Map instance configured with the given +// options. +func NewMap(options ...func(*MapConfig)) *Map { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + m := &Map{} m.resizeCond = *sync.NewCond(&m.resizeMu) var table *mapTable - if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { table = newMapTable(defaultMinMapTableLen) } else { - tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket)) + tableLen := nextPowOf2(uint32(c.sizeHint / entriesPerMapBucket)) table = newMapTable(int(tableLen)) } m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly atomic.StorePointer(&m.table, unsafe.Pointer(table)) return m } +// NewMapPresized creates a new Map instance with capacity enough to hold +// sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMap in combination with WithPresize. +func NewMapPresized(sizeHint int) *Map { + return NewMap(WithPresize(sizeHint)) +} + func newMapTable(minTableLen int) *mapTable { buckets := make([]bucketPadded, minTableLen) counterLen := minTableLen >> 10 @@ -473,8 +513,9 @@ func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) { knownTableLen := len(knownTable.buckets) // Fast path for shrink attempts. if hint == mapShrinkHint { - shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction) - if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { return } } diff --git a/map_test.go b/map_test.go index 2c5f868..ff75bea 100644 --- a/map_test.go +++ b/map_test.go @@ -575,14 +575,17 @@ func assertMapCapacity(t *testing.T, m *Map, expectedCap int) { func TestNewMapPresized(t *testing.T) { assertMapCapacity(t, NewMap(), DefaultMinMapTableCap) assertMapCapacity(t, NewMapPresized(1000), 1536) + assertMapCapacity(t, NewMap(WithPresize(1000)), 1536) assertMapCapacity(t, NewMapPresized(0), DefaultMinMapTableCap) + assertMapCapacity(t, NewMap(WithPresize(0)), DefaultMinMapTableCap) assertMapCapacity(t, NewMapPresized(-1), DefaultMinMapTableCap) + assertMapCapacity(t, NewMap(WithPresize(-1)), DefaultMinMapTableCap) } func TestNewMapPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { const minTableLen = 1024 const numEntries = minTableLen * EntriesPerMapBucket - m := NewMapPresized(numEntries) + m := NewMap(WithPresize(numEntries)) for i := 0; i < numEntries; i++ { m.Store(strconv.Itoa(i), i) } @@ -602,6 +605,38 @@ func TestNewMapPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { } } +func TestNewMapGrowOnly_OnlyShrinksOnClear(t *testing.T) { + const minTableLen = 128 + const numEntries = minTableLen * EntriesPerMapBucket + m := NewMap(WithPresize(numEntries), WithGrowOnly()) + + stats := CollectMapStats(m) + initialTableLen := stats.RootBuckets + + for i := 0; i < 2*numEntries; i++ { + m.Store(strconv.Itoa(i), i) + } + stats = CollectMapStats(m) + maxTableLen := stats.RootBuckets + if maxTableLen <= minTableLen { + t.Fatalf("table did not grow: %d", maxTableLen) + } + + for i := 0; i < numEntries; i++ { + m.Delete(strconv.Itoa(int(i))) + } + stats = CollectMapStats(m) + if stats.RootBuckets != maxTableLen { + t.Fatalf("table length was different from the expected: %d", stats.RootBuckets) + } + + m.Clear() + stats = CollectMapStats(m) + if stats.RootBuckets != initialTableLen { + t.Fatalf("table length was different from the initial: %d", stats.RootBuckets) + } +} + func TestMapResize(t *testing.T) { const numEntries = 100_000 m := NewMap() @@ -1217,7 +1252,7 @@ func BenchmarkMapStandard_NoWarmUp(b *testing.B) { func BenchmarkMap_WarmUp(b *testing.B) { for _, bc := range benchmarkCases { b.Run(bc.name, func(b *testing.B) { - m := NewMapPresized(benchmarkNumEntries) + m := NewMap(WithPresize(benchmarkNumEntries)) for i := 0; i < benchmarkNumEntries; i++ { m.Store(benchmarkKeyPrefix+strconv.Itoa(i), i) } diff --git a/mapof.go b/mapof.go index ea767da..a41213a 100644 --- a/mapof.go +++ b/mapof.go @@ -33,6 +33,7 @@ type MapOf[K comparable, V any] struct { table unsafe.Pointer // *mapOfTable hasher func(K, uint64) uint64 minTableLen int + growOnly bool } type mapOfTable[K comparable, V any] struct { @@ -65,9 +66,10 @@ type entryOf[K comparable, V any] struct { value V } -// NewMapOf creates a new MapOf instance. -func NewMapOf[K comparable, V any]() *MapOf[K, V] { - return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket) +// NewMapOf creates a new MapOf instance configured with the given +// options. +func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] { + return newMapOf[K, V](makeHasher[K](), options...) } // NewMapOfPresized creates a new MapOf instance with capacity enough @@ -75,25 +77,35 @@ func NewMapOf[K comparable, V any]() *MapOf[K, V] { // meaning that the underlying hash table will never shrink to // a smaller capacity. If sizeHint is zero or negative, the value // is ignored. +// +// Deprecated: use NewMapOf in combination with WithPresize. func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { - return newMapOfPresized[K, V](makeHasher[K](), sizeHint) + return NewMapOf[K, V](WithPresize(sizeHint)) } -func newMapOfPresized[K comparable, V any]( +func newMapOf[K comparable, V any]( hasher func(K, uint64) uint64, - sizeHint int, + options ...func(*MapConfig), ) *MapOf[K, V] { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + m := &MapOf[K, V]{} m.resizeCond = *sync.NewCond(&m.resizeMu) m.hasher = hasher var table *mapOfTable[K, V] - if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { table = newMapOfTable[K, V](defaultMinMapTableLen) } else { - tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket)) + tableLen := nextPowOf2(uint32(c.sizeHint / entriesPerMapBucket)) table = newMapOfTable[K, V](int(tableLen)) } m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly atomic.StorePointer(&m.table, unsafe.Pointer(table)) return m } @@ -423,8 +435,9 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) { knownTableLen := len(knownTable.buckets) // Fast path for shrink attempts. if hint == mapShrinkHint { - shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction) - if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { return } } diff --git a/mapof_test.go b/mapof_test.go index 00cd2e9..c9912be 100644 --- a/mapof_test.go +++ b/mapof_test.go @@ -274,11 +274,11 @@ func TestMapOfStore_StructKeys_StructValues(t *testing.T) { func TestMapOfStore_HashCodeCollisions(t *testing.T) { const numEntries = 1000 - m := NewMapOfPresizedWithHasher[int, int](func(i int, _ uint64) uint64 { + m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 { // We intentionally use an awful hash function here to make sure // that the map copes with key collisions. return 42 - }, numEntries) + }, WithPresize(numEntries)) for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -620,16 +620,21 @@ func assertMapOfCapacity[K comparable, V any](t *testing.T, m *MapOf[K, V], expe func TestNewMapOfPresized(t *testing.T) { assertMapOfCapacity(t, NewMapOf[string, string](), DefaultMinMapTableCap) assertMapOfCapacity(t, NewMapOfPresized[string, string](0), DefaultMinMapTableCap) + assertMapOfCapacity(t, NewMapOf[string, string](WithPresize(0)), DefaultMinMapTableCap) assertMapOfCapacity(t, NewMapOfPresized[string, string](-100), DefaultMinMapTableCap) + assertMapOfCapacity(t, NewMapOf[string, string](WithPresize(-100)), DefaultMinMapTableCap) assertMapOfCapacity(t, NewMapOfPresized[string, string](500), 768) + assertMapOfCapacity(t, NewMapOf[string, string](WithPresize(500)), 768) assertMapOfCapacity(t, NewMapOfPresized[int, int](1_000_000), 1_572_864) + assertMapOfCapacity(t, NewMapOf[int, int](WithPresize(1_000_000)), 1_572_864) assertMapOfCapacity(t, NewMapOfPresized[point, point](100), 192) + assertMapOfCapacity(t, NewMapOf[point, point](WithPresize(100)), 192) } func TestNewMapOfPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { const minTableLen = 1024 const numEntries = minTableLen * EntriesPerMapBucket - m := NewMapOfPresized[int, int](numEntries) + m := NewMapOf[int, int](WithPresize(numEntries)) for i := 0; i < numEntries; i++ { m.Store(i, i) } @@ -649,6 +654,38 @@ func TestNewMapOfPresized_DoesNotShrinkBelowMinTableLen(t *testing.T) { } } +func TestNewMapOfGrowOnly_OnlyShrinksOnClear(t *testing.T) { + const minTableLen = 128 + const numEntries = minTableLen * EntriesPerMapBucket + m := NewMapOf[int, int](WithPresize(numEntries), WithGrowOnly()) + + stats := CollectMapOfStats(m) + initialTableLen := stats.RootBuckets + + for i := 0; i < 2*numEntries; i++ { + m.Store(i, i) + } + stats = CollectMapOfStats(m) + maxTableLen := stats.RootBuckets + if maxTableLen <= minTableLen { + t.Fatalf("table did not grow: %d", maxTableLen) + } + + for i := 0; i < numEntries; i++ { + m.Delete(i) + } + stats = CollectMapOfStats(m) + if stats.RootBuckets != maxTableLen { + t.Fatalf("table length was different from the expected: %d", stats.RootBuckets) + } + + m.Clear() + stats = CollectMapOfStats(m) + if stats.RootBuckets != initialTableLen { + t.Fatalf("table length was different from the initial: %d", stats.RootBuckets) + } +} + func TestMapOfResize(t *testing.T) { const numEntries = 100_000 m := NewMapOf[string, int]()