aboutsummaryrefslogtreecommitdiff
path: root/src/crypto/internal/boring/cache_test.go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2022-04-27 09:02:53 -0400
committerRuss Cox <rsc@golang.org>2022-04-29 14:23:32 +0000
commite845f572ec6163fd3bad0267b5bb4f24d369bd93 (patch)
tree9156df472b1692b5a4ff180bdaf02e98b5f40dbb /src/crypto/internal/boring/cache_test.go
parenta840bf871e005d948ba6442948997eb3ef2e3c7f (diff)
downloadgo-e845f572ec6163fd3bad0267b5bb4f24d369bd93.tar.gz
go-e845f572ec6163fd3bad0267b5bb4f24d369bd93.zip
[dev.boringcrypto] crypto/ecdsa, crypto/rsa: use boring.Cache
In the original BoringCrypto port, ecdsa and rsa's public and private keys added a 'boring unsafe.Pointer' field to cache the BoringCrypto form of the key. This led to problems with code that “knew” the layout of those structs and in particular that they had no unexported fields. In response, as an awful kludge, I changed the compiler to pretend that field did not exist when laying out reflect data. Because we want to merge BoringCrypto in the main tree, we need a different solution. Using boring.Cache is that solution. For #51940. Change-Id: Ideb2b40b599a1dc223082eda35a5ea9abcc01e30 Reviewed-on: https://go-review.googlesource.com/c/go/+/395883 Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Roland Shoemaker <roland@golang.org>
Diffstat (limited to 'src/crypto/internal/boring/cache_test.go')
-rw-r--r--src/crypto/internal/boring/cache_test.go47
1 files changed, 41 insertions, 6 deletions
diff --git a/src/crypto/internal/boring/cache_test.go b/src/crypto/internal/boring/cache_test.go
index 050ba457b2..f9ccb74f6f 100644
--- a/src/crypto/internal/boring/cache_test.go
+++ b/src/crypto/internal/boring/cache_test.go
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build boringcrypto
-
package boring
import (
"fmt"
"runtime"
+ "sync"
+ "sync/atomic"
"testing"
"unsafe"
)
@@ -25,11 +25,10 @@ func TestCache(t *testing.T) {
c := new(Cache)
// Create many entries.
- seq := 0
+ seq := uint32(0)
next := func() unsafe.Pointer {
x := new(int)
- *x = seq
- seq++
+ *x = int(atomic.AddUint32(&seq, 1))
return unsafe.Pointer(x)
}
m := make(map[unsafe.Pointer]unsafe.Pointer)
@@ -67,7 +66,7 @@ func TestCache(t *testing.T) {
c.Clear()
for k := range m {
if cv := c.Get(k); cv != nil {
- t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
+ t.Fatalf("after GC, c.Get(%v) = %v, want nil", str(k), str(cv))
}
}
@@ -82,4 +81,40 @@ func TestCache(t *testing.T) {
t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
}
}
+
+ // Check that cache works for concurrent access.
+ // Lists are discarded if they reach 1000 entries,
+ // and there are cacheSize list heads, so we should be
+ // able to do 100 * cacheSize entries with no problem at all.
+ c = new(Cache)
+ var barrier, wg sync.WaitGroup
+ const N = 100
+ barrier.Add(N)
+ wg.Add(N)
+ var lost int32
+ for i := 0; i < N; i++ {
+ go func() {
+ defer wg.Done()
+
+ m := make(map[unsafe.Pointer]unsafe.Pointer)
+ for j := 0; j < cacheSize; j++ {
+ k, v := next(), next()
+ m[k] = v
+ c.Put(k, v)
+ }
+ barrier.Done()
+ barrier.Wait()
+
+ for k, v := range m {
+ if cv := c.Get(k); cv != v {
+ t.Errorf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
+ atomic.AddInt32(&lost, +1)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ if lost != 0 {
+ t.Errorf("lost %d entries", lost)
+ }
}