aboutsummaryrefslogtreecommitdiff
path: root/src/crypto/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/internal')
-rw-r--r--src/crypto/internal/boring/cache.go16
-rw-r--r--src/crypto/internal/boring/cache_test.go47
2 files changed, 50 insertions, 13 deletions
diff --git a/src/crypto/internal/boring/cache.go b/src/crypto/internal/boring/cache.go
index 4cf608368f..476e47706c 100644
--- a/src/crypto/internal/boring/cache.go
+++ b/src/crypto/internal/boring/cache.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build boringcrypto
-
package boring
import (
@@ -39,7 +37,7 @@ type cacheEntry struct {
next *cacheEntry // immutable once linked into table
}
-func registerCache(unsafe.Pointer)
+func registerCache(unsafe.Pointer) // provided by runtime
// Register registers the cache with the runtime,
// so that c.ptable can be cleared at the start of each GC.
@@ -106,7 +104,8 @@ func (c *Cache) Put(k, v unsafe.Pointer) {
//
// 1. We track in noK the start of the section of
// the list that we've confirmed has no entry for k.
- // The next time down the list, we can stop at noK.
+ // The next time down the list, we can stop at noK,
+ // because new entries are inserted at the front of the list.
// This guarantees we never traverse an entry
// multiple times.
//
@@ -127,12 +126,15 @@ func (c *Cache) Put(k, v unsafe.Pointer) {
if add == nil {
add = &cacheEntry{k, v, nil}
}
- if n < 1000 {
- add.next = start
+ add.next = start
+ if n >= 1000 {
+ // If an individual list gets too long, which shouldn't happen,
+ // throw it away to avoid quadratic lookup behavior.
+ add.next = nil
}
if atomic.CompareAndSwapPointer(head, unsafe.Pointer(start), unsafe.Pointer(add)) {
return
}
- noK = e
+ noK = start
}
}
diff --git a/src/crypto/internal/boring/cache_test.go b/src/crypto/internal/boring/cache_test.go
index 050ba457b2..f9ccb74f6f 100644
--- a/src/crypto/internal/boring/cache_test.go
+++ b/src/crypto/internal/boring/cache_test.go
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build boringcrypto
-
package boring
import (
"fmt"
"runtime"
+ "sync"
+ "sync/atomic"
"testing"
"unsafe"
)
@@ -25,11 +25,10 @@ func TestCache(t *testing.T) {
c := new(Cache)
// Create many entries.
- seq := 0
+ seq := uint32(0)
next := func() unsafe.Pointer {
x := new(int)
- *x = seq
- seq++
+ *x = int(atomic.AddUint32(&seq, 1))
return unsafe.Pointer(x)
}
m := make(map[unsafe.Pointer]unsafe.Pointer)
@@ -67,7 +66,7 @@ func TestCache(t *testing.T) {
c.Clear()
for k := range m {
if cv := c.Get(k); cv != nil {
- t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
+ t.Fatalf("after GC, c.Get(%v) = %v, want nil", str(k), str(cv))
}
}
@@ -82,4 +81,40 @@ func TestCache(t *testing.T) {
t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
}
}
+
+ // Check that cache works for concurrent access.
+ // Lists are discarded if they reach 1000 entries,
+ // and there are cacheSize list heads, so we should be
+ // able to do 100 * cacheSize entries with no problem at all.
+ c = new(Cache)
+ var barrier, wg sync.WaitGroup
+ const N = 100
+ barrier.Add(N)
+ wg.Add(N)
+ var lost int32
+ for i := 0; i < N; i++ {
+ go func() {
+ defer wg.Done()
+
+ m := make(map[unsafe.Pointer]unsafe.Pointer)
+ for j := 0; j < cacheSize; j++ {
+ k, v := next(), next()
+ m[k] = v
+ c.Put(k, v)
+ }
+ barrier.Done()
+ barrier.Wait()
+
+ for k, v := range m {
+ if cv := c.Get(k); cv != v {
+ t.Errorf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
+ atomic.AddInt32(&lost, +1)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ if lost != 0 {
+ t.Errorf("lost %d entries", lost)
+ }
}