aboutsummaryrefslogtreecommitdiff
path: root/src/crypto/internal/boring/cache.go
blob: 4cf608368f165b7028200c1a95d69266ac578d35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:build boringcrypto

package boring

import (
	"sync/atomic"
	"unsafe"
)

// A Cache is a GC-friendly concurrent map from unsafe.Pointer to
// unsafe.Pointer. It is meant to be used for maintaining shadow
// BoringCrypto state associated with certain allocated structs, in
// particular public and private RSA and ECDSA keys.
//
// The cache is GC-friendly in the sense that the keys do not
// indefinitely prevent the garbage collector from collecting them.
// Instead, at the start of each GC, the cache is cleared entirely. That
// is, the cache is lossy, and the loss happens at the start of each GC.
// This means that clients need to be able to cope with cache entries
// disappearing, but it also means that clients don't need to worry about
// cache entries keeping the keys from being collected.
//
// TODO(rsc): Make Cache generic once consumers can handle that.
type Cache struct {
	// ptable is an atomic *[cacheSize]unsafe.Pointer,
	// where each unsafe.Pointer is an atomic *cacheEntry.
	// The runtime atomically stores nil to ptable at the start of each GC.
	ptable unsafe.Pointer
}

// A cacheEntry is a single entry in the linked list for a given hash table entry.
type cacheEntry struct {
	k    unsafe.Pointer // immutable once created
	v    unsafe.Pointer // read and written atomically to allow updates
	next *cacheEntry    // immutable once linked into table
}

func registerCache(unsafe.Pointer)

// Register registers the cache with the runtime,
// so that c.ptable can be cleared at the start of each GC.
// Register must be called during package initialization.
func (c *Cache) Register() {
	registerCache(unsafe.Pointer(&c.ptable))
}

// cacheSize is the number of entries in the hash table.
// The hash is the pointer value mod cacheSize, a prime.
// Collisions are resolved by maintaining a linked list in each hash slot.
const cacheSize = 1021

// table returns a pointer to the current cache hash table,
// coping with the possibility of the GC clearing it out from under us.
func (c *Cache) table() *[cacheSize]unsafe.Pointer {
	for {
		p := atomic.LoadPointer(&c.ptable)
		if p == nil {
			p = unsafe.Pointer(new([cacheSize]unsafe.Pointer))
			if !atomic.CompareAndSwapPointer(&c.ptable, nil, p) {
				continue
			}
		}
		return (*[cacheSize]unsafe.Pointer)(p)
	}
}

// Clear clears the cache.
// The runtime does this automatically at each garbage collection;
// this method is exposed only for testing.
func (c *Cache) Clear() {
	// The runtime does this at the start of every garbage collection
	// (itself, not by calling this function).
	atomic.StorePointer(&c.ptable, nil)
}

// Get returns the cached value associated with v,
// which is either the value v corresponding to the most recent call to Put(k, v)
// or nil if that cache entry has been dropped.
func (c *Cache) Get(k unsafe.Pointer) unsafe.Pointer {
	head := &c.table()[uintptr(k)%cacheSize]
	e := (*cacheEntry)(atomic.LoadPointer(head))
	for ; e != nil; e = e.next {
		if e.k == k {
			return atomic.LoadPointer(&e.v)
		}
	}
	return nil
}

// Put sets the cached value associated with k to v.
func (c *Cache) Put(k, v unsafe.Pointer) {
	head := &c.table()[uintptr(k)%cacheSize]

	// Strategy is to walk the linked list at head,
	// same as in Get, to look for existing entry.
	// If we find one, we update v atomically in place.
	// If not, then we race to replace the start = *head
	// we observed with a new k, v entry.
	// If we win that race, we're done.
	// Otherwise, we try the whole thing again,
	// with two optimizations:
	//
	//  1. We track in noK the start of the section of
	//     the list that we've confirmed has no entry for k.
	//     The next time down the list, we can stop at noK.
	//     This guarantees we never traverse an entry
	//     multiple times.
	//
	//  2. We only allocate the entry to be added once,
	//     saving it in add for the next attempt.
	var add, noK *cacheEntry
	n := 0
	for {
		e := (*cacheEntry)(atomic.LoadPointer(head))
		start := e
		for ; e != nil && e != noK; e = e.next {
			if e.k == k {
				atomic.StorePointer(&e.v, v)
				return
			}
			n++
		}
		if add == nil {
			add = &cacheEntry{k, v, nil}
		}
		if n < 1000 {
			add.next = start
		}
		if atomic.CompareAndSwapPointer(head, unsafe.Pointer(start), unsafe.Pointer(add)) {
			return
		}
		noK = e
	}
}