|
1 | | -// Copyright 2016 The Go Authors. All rights reserved. |
| 1 | +// Copyright 2019 The Go Authors. All rights reserved. |
2 | 2 | // Use of this source code is governed by a BSD-style |
3 | 3 | // license that can be found in the LICENSE file. |
4 | 4 |
|
5 | 5 | // Package syncmap provides a concurrent map implementation. |
6 | | -// It is a prototype for a proposed addition to the sync package |
7 | | -// in the standard library. |
8 | | -// (https://golang.org/issue/18177) |
| 6 | +// This was the prototype for sync.Map which was added to the standard library's |
| 7 | +// sync package in Go 1.9. https://golang.org/pkg/sync/#Map. |
9 | 8 | package syncmap |
10 | | - |
11 | | -import ( |
12 | | -"sync" |
13 | | -"sync/atomic" |
14 | | -"unsafe" |
15 | | -) |
16 | | - |
17 | | -// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. |
18 | | -// It is safe for multiple goroutines to call a Map's methods concurrently. |
19 | | -// |
20 | | -// The zero Map is valid and empty. |
21 | | -// |
22 | | -// A Map must not be copied after first use. |
23 | | -type Map struct { |
24 | | -mu sync.Mutex |
25 | | - |
26 | | -// read contains the portion of the map's contents that are safe for |
27 | | -// concurrent access (with or without mu held). |
28 | | -// |
29 | | -// The read field itself is always safe to load, but must only be stored with |
30 | | -// mu held. |
31 | | -// |
32 | | -// Entries stored in read may be updated concurrently without mu, but updating |
33 | | -// a previously-expunged entry requires that the entry be copied to the dirty |
34 | | -// map and unexpunged with mu held. |
35 | | -read atomic.Value // readOnly |
36 | | - |
37 | | -// dirty contains the portion of the map's contents that require mu to be |
38 | | -// held. To ensure that the dirty map can be promoted to the read map quickly, |
39 | | -// it also includes all of the non-expunged entries in the read map. |
40 | | -// |
41 | | -// Expunged entries are not stored in the dirty map. An expunged entry in the |
42 | | -// clean map must be unexpunged and added to the dirty map before a new value |
43 | | -// can be stored to it. |
44 | | -// |
45 | | -// If the dirty map is nil, the next write to the map will initialize it by |
46 | | -// making a shallow copy of the clean map, omitting stale entries. |
47 | | -dirty map[interface{}]*entry |
48 | | - |
49 | | -// misses counts the number of loads since the read map was last updated that |
50 | | -// needed to lock mu to determine whether the key was present. |
51 | | -// |
52 | | -// Once enough misses have occurred to cover the cost of copying the dirty |
53 | | -// map, the dirty map will be promoted to the read map (in the unamended |
54 | | -// state) and the next store to the map will make a new dirty copy. |
55 | | -misses int |
56 | | -} |
57 | | - |
58 | | -// readOnly is an immutable struct stored atomically in the Map.read field. |
59 | | -type readOnly struct { |
60 | | -m map[interface{}]*entry |
61 | | -amended bool // true if the dirty map contains some key not in m. |
62 | | -} |
63 | | - |
64 | | -// expunged is an arbitrary pointer that marks entries which have been deleted |
65 | | -// from the dirty map. |
66 | | -var expunged = unsafe.Pointer(new(interface{})) |
67 | | - |
68 | | -// An entry is a slot in the map corresponding to a particular key. |
69 | | -type entry struct { |
70 | | -// p points to the interface{} value stored for the entry. |
71 | | -// |
72 | | -// If p == nil, the entry has been deleted and m.dirty == nil. |
73 | | -// |
74 | | -// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry |
75 | | -// is missing from m.dirty. |
76 | | -// |
77 | | -// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty |
78 | | -// != nil, in m.dirty[key]. |
79 | | -// |
80 | | -// An entry can be deleted by atomic replacement with nil: when m.dirty is |
81 | | -// next created, it will atomically replace nil with expunged and leave |
82 | | -// m.dirty[key] unset. |
83 | | -// |
84 | | -// An entry's associated value can be updated by atomic replacement, provided |
85 | | -// p != expunged. If p == expunged, an entry's associated value can be updated |
86 | | -// only after first setting m.dirty[key] = e so that lookups using the dirty |
87 | | -// map find the entry. |
88 | | -p unsafe.Pointer // *interface{} |
89 | | -} |
90 | | - |
91 | | -func newEntry(i interface{}) *entry { |
92 | | -return &entry{p: unsafe.Pointer(&i)} |
93 | | -} |
94 | | - |
95 | | -// Load returns the value stored in the map for a key, or nil if no |
96 | | -// value is present. |
97 | | -// The ok result indicates whether value was found in the map. |
98 | | -func (m *Map) Load(key interface{}) (value interface{}, ok bool) { |
99 | | -read, _ := m.read.Load().(readOnly) |
100 | | -e, ok := read.m[key] |
101 | | -if !ok && read.amended { |
102 | | -m.mu.Lock() |
103 | | -// Avoid reporting a spurious miss if m.dirty got promoted while we were |
104 | | -// blocked on m.mu. (If further loads of the same key will not miss, it's |
105 | | -// not worth copying the dirty map for this key.) |
106 | | -read, _ = m.read.Load().(readOnly) |
107 | | -e, ok = read.m[key] |
108 | | -if !ok && read.amended { |
109 | | -e, ok = m.dirty[key] |
110 | | -// Regardless of whether the entry was present, record a miss: this key |
111 | | -// will take the slow path until the dirty map is promoted to the read |
112 | | -// map. |
113 | | -m.missLocked() |
114 | | -} |
115 | | -m.mu.Unlock() |
116 | | -} |
117 | | -if !ok { |
118 | | -return nil, false |
119 | | -} |
120 | | -return e.load() |
121 | | -} |
122 | | - |
123 | | -func (e *entry) load() (value interface{}, ok bool) { |
124 | | -p := atomic.LoadPointer(&e.p) |
125 | | -if p == nil || p == expunged { |
126 | | -return nil, false |
127 | | -} |
128 | | -return *(*interface{})(p), true |
129 | | -} |
130 | | - |
131 | | -// Store sets the value for a key. |
132 | | -func (m *Map) Store(key, value interface{}) { |
133 | | -read, _ := m.read.Load().(readOnly) |
134 | | -if e, ok := read.m[key]; ok && e.tryStore(&value) { |
135 | | -return |
136 | | -} |
137 | | - |
138 | | -m.mu.Lock() |
139 | | -read, _ = m.read.Load().(readOnly) |
140 | | -if e, ok := read.m[key]; ok { |
141 | | -if e.unexpungeLocked() { |
142 | | -// The entry was previously expunged, which implies that there is a |
143 | | -// non-nil dirty map and this entry is not in it. |
144 | | -m.dirty[key] = e |
145 | | -} |
146 | | -e.storeLocked(&value) |
147 | | -} else if e, ok := m.dirty[key]; ok { |
148 | | -e.storeLocked(&value) |
149 | | -} else { |
150 | | -if !read.amended { |
151 | | -// We're adding the first new key to the dirty map. |
152 | | -// Make sure it is allocated and mark the read-only map as incomplete. |
153 | | -m.dirtyLocked() |
154 | | -m.read.Store(readOnly{m: read.m, amended: true}) |
155 | | -} |
156 | | -m.dirty[key] = newEntry(value) |
157 | | -} |
158 | | -m.mu.Unlock() |
159 | | -} |
160 | | - |
161 | | -// tryStore stores a value if the entry has not been expunged. |
162 | | -// |
163 | | -// If the entry is expunged, tryStore returns false and leaves the entry |
164 | | -// unchanged. |
165 | | -func (e *entry) tryStore(i *interface{}) bool { |
166 | | -p := atomic.LoadPointer(&e.p) |
167 | | -if p == expunged { |
168 | | -return false |
169 | | -} |
170 | | -for { |
171 | | -if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { |
172 | | -return true |
173 | | -} |
174 | | -p = atomic.LoadPointer(&e.p) |
175 | | -if p == expunged { |
176 | | -return false |
177 | | -} |
178 | | -} |
179 | | -} |
180 | | - |
181 | | -// unexpungeLocked ensures that the entry is not marked as expunged. |
182 | | -// |
183 | | -// If the entry was previously expunged, it must be added to the dirty map |
184 | | -// before m.mu is unlocked. |
185 | | -func (e *entry) unexpungeLocked() (wasExpunged bool) { |
186 | | -return atomic.CompareAndSwapPointer(&e.p, expunged, nil) |
187 | | -} |
188 | | - |
189 | | -// storeLocked unconditionally stores a value to the entry. |
190 | | -// |
191 | | -// The entry must be known not to be expunged. |
192 | | -func (e *entry) storeLocked(i *interface{}) { |
193 | | -atomic.StorePointer(&e.p, unsafe.Pointer(i)) |
194 | | -} |
195 | | - |
196 | | -// LoadOrStore returns the existing value for the key if present. |
197 | | -// Otherwise, it stores and returns the given value. |
198 | | -// The loaded result is true if the value was loaded, false if stored. |
199 | | -func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { |
200 | | -// Avoid locking if it's a clean hit. |
201 | | -read, _ := m.read.Load().(readOnly) |
202 | | -if e, ok := read.m[key]; ok { |
203 | | -actual, loaded, ok := e.tryLoadOrStore(value) |
204 | | -if ok { |
205 | | -return actual, loaded |
206 | | -} |
207 | | -} |
208 | | - |
209 | | -m.mu.Lock() |
210 | | -read, _ = m.read.Load().(readOnly) |
211 | | -if e, ok := read.m[key]; ok { |
212 | | -if e.unexpungeLocked() { |
213 | | -m.dirty[key] = e |
214 | | -} |
215 | | -actual, loaded, _ = e.tryLoadOrStore(value) |
216 | | -} else if e, ok := m.dirty[key]; ok { |
217 | | -actual, loaded, _ = e.tryLoadOrStore(value) |
218 | | -m.missLocked() |
219 | | -} else { |
220 | | -if !read.amended { |
221 | | -// We're adding the first new key to the dirty map. |
222 | | -// Make sure it is allocated and mark the read-only map as incomplete. |
223 | | -m.dirtyLocked() |
224 | | -m.read.Store(readOnly{m: read.m, amended: true}) |
225 | | -} |
226 | | -m.dirty[key] = newEntry(value) |
227 | | -actual, loaded = value, false |
228 | | -} |
229 | | -m.mu.Unlock() |
230 | | - |
231 | | -return actual, loaded |
232 | | -} |
233 | | - |
234 | | -// tryLoadOrStore atomically loads or stores a value if the entry is not |
235 | | -// expunged. |
236 | | -// |
237 | | -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and |
238 | | -// returns with ok==false. |
239 | | -func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { |
240 | | -p := atomic.LoadPointer(&e.p) |
241 | | -if p == expunged { |
242 | | -return nil, false, false |
243 | | -} |
244 | | -if p != nil { |
245 | | -return *(*interface{})(p), true, true |
246 | | -} |
247 | | - |
248 | | -// Copy the interface after the first load to make this method more amenable |
249 | | -// to escape analysis: if we hit the "load" path or the entry is expunged, we |
250 | | -// shouldn't bother heap-allocating. |
251 | | -ic := i |
252 | | -for { |
253 | | -if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { |
254 | | -return i, false, true |
255 | | -} |
256 | | -p = atomic.LoadPointer(&e.p) |
257 | | -if p == expunged { |
258 | | -return nil, false, false |
259 | | -} |
260 | | -if p != nil { |
261 | | -return *(*interface{})(p), true, true |
262 | | -} |
263 | | -} |
264 | | -} |
265 | | - |
266 | | -// Delete deletes the value for a key. |
267 | | -func (m *Map) Delete(key interface{}) { |
268 | | -read, _ := m.read.Load().(readOnly) |
269 | | -e, ok := read.m[key] |
270 | | -if !ok && read.amended { |
271 | | -m.mu.Lock() |
272 | | -read, _ = m.read.Load().(readOnly) |
273 | | -e, ok = read.m[key] |
274 | | -if !ok && read.amended { |
275 | | -delete(m.dirty, key) |
276 | | -} |
277 | | -m.mu.Unlock() |
278 | | -} |
279 | | -if ok { |
280 | | -e.delete() |
281 | | -} |
282 | | -} |
283 | | - |
284 | | -func (e *entry) delete() (hadValue bool) { |
285 | | -for { |
286 | | -p := atomic.LoadPointer(&e.p) |
287 | | -if p == nil || p == expunged { |
288 | | -return false |
289 | | -} |
290 | | -if atomic.CompareAndSwapPointer(&e.p, p, nil) { |
291 | | -return true |
292 | | -} |
293 | | -} |
294 | | -} |
295 | | - |
296 | | -// Range calls f sequentially for each key and value present in the map. |
297 | | -// If f returns false, range stops the iteration. |
298 | | -// |
299 | | -// Range does not necessarily correspond to any consistent snapshot of the Map's |
300 | | -// contents: no key will be visited more than once, but if the value for any key |
301 | | -// is stored or deleted concurrently, Range may reflect any mapping for that key |
302 | | -// from any point during the Range call. |
303 | | -// |
304 | | -// Range may be O(N) with the number of elements in the map even if f returns |
305 | | -// false after a constant number of calls. |
306 | | -func (m *Map) Range(f func(key, value interface{}) bool) { |
307 | | -// We need to be able to iterate over all of the keys that were already |
308 | | -// present at the start of the call to Range. |
309 | | -// If read.amended is false, then read.m satisfies that property without |
310 | | -// requiring us to hold m.mu for a long time. |
311 | | -read, _ := m.read.Load().(readOnly) |
312 | | -if read.amended { |
313 | | -// m.dirty contains keys not in read.m. Fortunately, Range is already O(N) |
314 | | -// (assuming the caller does not break out early), so a call to Range |
315 | | -// amortizes an entire copy of the map: we can promote the dirty copy |
316 | | -// immediately! |
317 | | -m.mu.Lock() |
318 | | -read, _ = m.read.Load().(readOnly) |
319 | | -if read.amended { |
320 | | -read = readOnly{m: m.dirty} |
321 | | -m.read.Store(read) |
322 | | -m.dirty = nil |
323 | | -m.misses = 0 |
324 | | -} |
325 | | -m.mu.Unlock() |
326 | | -} |
327 | | - |
328 | | -for k, e := range read.m { |
329 | | -v, ok := e.load() |
330 | | -if !ok { |
331 | | -continue |
332 | | -} |
333 | | -if !f(k, v) { |
334 | | -break |
335 | | -} |
336 | | -} |
337 | | -} |
338 | | - |
339 | | -func (m *Map) missLocked() { |
340 | | -m.misses++ |
341 | | -if m.misses < len(m.dirty) { |
342 | | -return |
343 | | -} |
344 | | -m.read.Store(readOnly{m: m.dirty}) |
345 | | -m.dirty = nil |
346 | | -m.misses = 0 |
347 | | -} |
348 | | - |
349 | | -func (m *Map) dirtyLocked() { |
350 | | -if m.dirty != nil { |
351 | | -return |
352 | | -} |
353 | | - |
354 | | -read, _ := m.read.Load().(readOnly) |
355 | | -m.dirty = make(map[interface{}]*entry, len(read.m)) |
356 | | -for k, e := range read.m { |
357 | | -if !e.tryExpungeLocked() { |
358 | | -m.dirty[k] = e |
359 | | -} |
360 | | -} |
361 | | -} |
362 | | - |
363 | | -func (e *entry) tryExpungeLocked() (isExpunged bool) { |
364 | | -p := atomic.LoadPointer(&e.p) |
365 | | -for p == nil { |
366 | | -if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { |
367 | | -return true |
368 | | -} |
369 | | -p = atomic.LoadPointer(&e.p) |
370 | | -} |
371 | | -return p == expunged |
372 | | -} |
0 commit comments