Skip to content

Commit aed8afc

Browse files
committed
implement SharedShardCache
1 parent 9a4a962 commit aed8afc

File tree

2 files changed

+40
-3
lines changed

2 files changed

+40
-3
lines changed

maps.go

+36
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package go_concurrency
22

33
import (
44
"sync"
5+
"sync/atomic"
56
)
67

78
func nextPowerOf2(v int) int {
@@ -104,6 +105,41 @@ func (m *ShardCache) Put(key int, value int) {
104105
m.maps[key%10][key] = value
105106
}
106107

108+
const SharedShardMask = 16 - 1
109+
110+
type SharedShardCache struct {
111+
maps [16]atomic.Value
112+
locks [16]sync.Mutex
113+
}
114+
115+
func NewSharedShardCache() *SharedShardCache {
116+
m := SharedShardCache{}
117+
for i := 0; i < 16; i++ {
118+
m.maps[i].Store(make(map[int]int))
119+
}
120+
return &m
121+
}
122+
123+
func (m *SharedShardCache) Get(key int) int {
124+
m0 := m.maps[key&SharedShardMask]
125+
return m0.Load().(map[int]int)[key]
126+
}
127+
128+
func (m *SharedShardCache) Put(key int, value int) {
129+
lock := m.locks[key&SharedShardMask]
130+
lock.Lock()
131+
m0 := m.maps[key&SharedShardMask].Load().(map[int]int)
132+
// make a new map and atomically store, this could be optimized because if
133+
// the key already exists in the map, we can safely update the value in the
134+
// main map and just restore to enforce the memory fence
135+
m1 := make(map[int]int)
136+
for k, v := range m0 {
137+
m1[k] = v
138+
}
139+
m1[key] = value
140+
m.maps[key&SharedShardMask].Store(m1)
141+
}
142+
107143
type UnsharedCache map[int]int
108144

109145
func NewUnsharedCache() *UnsharedCache {

maps_test.go

+4-3
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,15 @@ import (
99
"time"
1010
)
1111

12-
const NGOS = 2 // number of concurrent go routines for read/load tests
12+
const NGOS = 8 // number of concurrent go routines for read/load tests
1313
const Mask = (1024 * 1024) - 1
1414

1515
var um = go_concurrency.NewUnsharedCache()
1616
var lm = go_concurrency.NewLockCache()
1717
var sm = go_concurrency.NewSyncCache()
1818
var cm = go_concurrency.NewChannelCache()
1919
var sc = go_concurrency.NewShardCache()
20+
var ssc = go_concurrency.NewSharedShardCache()
2021
var im = go_concurrency.NewIntMap(256000) // so there are 4x collisions
2122
var im2 = go_concurrency.NewIntMap(1000000) // so there are no collisions
2223

@@ -80,8 +81,8 @@ func BenchmarkMain(m *testing.B) {
8081
m.ResetTimer()
8182

8283
impls := []go_concurrency.Cache{um, lm, sm, cm, sc, im, im2}
83-
names := []string{"unshared", "lock", "sync", "channel", "shard", "intmap", "intmap2"}
84-
multi := []bool{false, true, true, true, false, true, true}
84+
names := []string{"unshared", "lock", "sync", "channel", "shard", "shareshard", "intmap", "intmap2"}
85+
multi := []bool{false, true, true, true, false, true, true, true}
8586

8687
for i := 0; i < len(impls); i++ {
8788
impl := impls[i]

0 commit comments

Comments
 (0)