aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/crypto/internal/boring/cache.go138
-rw-r--r--src/crypto/internal/boring/cache_test.go85
-rw-r--r--src/runtime/mgc.go6
-rw-r--r--src/runtime/mgc_boring.go14
4 files changed, 243 insertions, 0 deletions
diff --git a/src/crypto/internal/boring/cache.go b/src/crypto/internal/boring/cache.go
new file mode 100644
index 00000000000..4cf608368f1
--- /dev/null
+++ b/src/crypto/internal/boring/cache.go
@@ -0,0 +1,138 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto
+
+package boring
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// A Cache is a GC-friendly concurrent map from unsafe.Pointer to
+// unsafe.Pointer. It is meant to be used for maintaining shadow
+// BoringCrypto state associated with certain allocated structs, in
+// particular public and private RSA and ECDSA keys.
+//
+// The cache is GC-friendly in the sense that the keys do not
+// indefinitely prevent the garbage collector from collecting them.
+// Instead, at the start of each GC, the cache is cleared entirely. That
+// is, the cache is lossy, and the loss happens at the start of each GC.
+// This means that clients need to be able to cope with cache entries
+// disappearing, but it also means that clients don't need to worry about
+// cache entries keeping the keys from being collected.
+//
+// TODO(rsc): Make Cache generic once consumers can handle that.
+type Cache struct {
+ // ptable is an atomic *[cacheSize]unsafe.Pointer,
+ // where each unsafe.Pointer is an atomic *cacheEntry.
+ // The runtime atomically stores nil to ptable at the start of each GC.
+ ptable unsafe.Pointer
+}
+
+// A cacheEntry is a single entry in the linked list for a given hash table entry.
+type cacheEntry struct {
+ k unsafe.Pointer // immutable once created
+ v unsafe.Pointer // read and written atomically to allow updates
+ next *cacheEntry // immutable once linked into table
+}
+
+func registerCache(unsafe.Pointer)
+
+// Register registers the cache with the runtime,
+// so that c.ptable can be cleared at the start of each GC.
+// Register must be called during package initialization.
+func (c *Cache) Register() {
+ registerCache(unsafe.Pointer(&c.ptable))
+}
+
+// cacheSize is the number of entries in the hash table.
+// The hash is the pointer value mod cacheSize, a prime.
+// Collisions are resolved by maintaining a linked list in each hash slot.
+const cacheSize = 1021
+
+// table returns a pointer to the current cache hash table,
+// coping with the possibility of the GC clearing it out from under us.
+func (c *Cache) table() *[cacheSize]unsafe.Pointer {
+ for {
+ p := atomic.LoadPointer(&c.ptable)
+ if p == nil {
+ p = unsafe.Pointer(new([cacheSize]unsafe.Pointer))
+ if !atomic.CompareAndSwapPointer(&c.ptable, nil, p) {
+ continue
+ }
+ }
+ return (*[cacheSize]unsafe.Pointer)(p)
+ }
+}
+
+// Clear clears the cache.
+// The runtime does this automatically at each garbage collection;
+// this method is exposed only for testing.
+func (c *Cache) Clear() {
+ // The runtime does this at the start of every garbage collection
+ // (itself, not by calling this function).
+ atomic.StorePointer(&c.ptable, nil)
+}
+
+// Get returns the cached value associated with v,
+// which is either the value v corresponding to the most recent call to Put(k, v)
+// or nil if that cache entry has been dropped.
+func (c *Cache) Get(k unsafe.Pointer) unsafe.Pointer {
+ head := &c.table()[uintptr(k)%cacheSize]
+ e := (*cacheEntry)(atomic.LoadPointer(head))
+ for ; e != nil; e = e.next {
+ if e.k == k {
+ return atomic.LoadPointer(&e.v)
+ }
+ }
+ return nil
+}
+
+// Put sets the cached value associated with k to v.
+func (c *Cache) Put(k, v unsafe.Pointer) {
+ head := &c.table()[uintptr(k)%cacheSize]
+
+ // Strategy is to walk the linked list at head,
+ // same as in Get, to look for existing entry.
+ // If we find one, we update v atomically in place.
+ // If not, then we race to replace the start = *head
+ // we observed with a new k, v entry.
+ // If we win that race, we're done.
+ // Otherwise, we try the whole thing again,
+ // with two optimizations:
+ //
+ // 1. We track in noK the start of the section of
+ // the list that we've confirmed has no entry for k.
+ // The next time down the list, we can stop at noK.
+ // This guarantees we never traverse an entry
+ // multiple times.
+ //
+ // 2. We only allocate the entry to be added once,
+ // saving it in add for the next attempt.
+ var add, noK *cacheEntry
+ n := 0
+ for {
+ e := (*cacheEntry)(atomic.LoadPointer(head))
+ start := e
+ for ; e != nil && e != noK; e = e.next {
+ if e.k == k {
+ atomic.StorePointer(&e.v, v)
+ return
+ }
+ n++
+ }
+ if add == nil {
+ add = &cacheEntry{k, v, nil}
+ }
+ if n < 1000 {
+ add.next = start
+ }
+ if atomic.CompareAndSwapPointer(head, unsafe.Pointer(start), unsafe.Pointer(add)) {
+ return
+ }
+ noK = e
+ }
+}
diff --git a/src/crypto/internal/boring/cache_test.go b/src/crypto/internal/boring/cache_test.go
new file mode 100644
index 00000000000..050ba457b29
--- /dev/null
+++ b/src/crypto/internal/boring/cache_test.go
@@ -0,0 +1,85 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto
+
+package boring
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+var registeredCache Cache
+
+func init() {
+ registeredCache.Register()
+}
+
+func TestCache(t *testing.T) {
+ // Use unregistered cache for functionality tests,
+ // to keep the runtime from clearing behind our backs.
+ c := new(Cache)
+
+ // Create many entries.
+ seq := 0
+ next := func() unsafe.Pointer {
+ x := new(int)
+ *x = seq
+ seq++
+ return unsafe.Pointer(x)
+ }
+ m := make(map[unsafe.Pointer]unsafe.Pointer)
+ for i := 0; i < 10000; i++ {
+ k := next()
+ v := next()
+ m[k] = v
+ c.Put(k, v)
+ }
+
+ // Overwrite a random 20% of those.
+ n := 0
+ for k := range m {
+ v := next()
+ m[k] = v
+ c.Put(k, v)
+ if n++; n >= 2000 {
+ break
+ }
+ }
+
+ // Check results.
+ str := func(p unsafe.Pointer) string {
+ if p == nil {
+ return "nil"
+ }
+ return fmt.Sprint(*(*int)(p))
+ }
+ for k, v := range m {
+ if cv := c.Get(k); cv != v {
+ t.Fatalf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
+ }
+ }
+
+ c.Clear()
+ for k := range m {
+ if cv := c.Get(k); cv != nil {
+ t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
+ }
+ }
+
+ // Check that registered cache is cleared at GC.
+ c = &registeredCache
+ for k, v := range m {
+ c.Put(k, v)
+ }
+ runtime.GC()
+ for k := range m {
+ if cv := c.Get(k); cv != nil {
+ t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
+ }
+ }
+}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 604d0db24a2..b2558c8bd3c 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -1536,6 +1536,7 @@ func gcResetMarkState() {
// Hooks for other packages
var poolcleanup func()
+var boringCaches []unsafe.Pointer
//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
func sync_runtime_registerPoolCleanup(f func()) {
@@ -1548,6 +1549,11 @@ func clearpools() {
poolcleanup()
}
+ // clear boringcrypto caches
+ for _, p := range boringCaches {
+ atomicstorep(p, nil)
+ }
+
// Clear central sudog cache.
// Leave per-P caches alone, they have strictly bounded size.
// Disconnect cached list before dropping it on the floor,
diff --git a/src/runtime/mgc_boring.go b/src/runtime/mgc_boring.go
new file mode 100644
index 00000000000..149ba51dd7e
--- /dev/null
+++ b/src/runtime/mgc_boring.go
@@ -0,0 +1,14 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto
+
+package runtime
+
+import "unsafe"
+
+//go:linkname boring_registerCache crypto/internal/boring.registerCache
+func boring_registerCache(p unsafe.Pointer) {
+ boringCaches = append(boringCaches, p)
+}