aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc_test.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2020-09-10 21:20:46 +0000
committerMichael Knyszek <mknyszek@google.com>2020-10-01 19:13:03 +0000
commit5756b3560141d0c09c4a27d2025f5438f49f59f2 (patch)
tree5566f0116be723146e00cb3b565b5ad2544f7314 /src/runtime/malloc_test.go
parentcc2a5cf4b8b0aeaccd3dd439f8d3d68f25eef358 (diff)
downloadgo-5756b3560141d0c09c4a27d2025f5438f49f59f2.tar.gz
go-5756b3560141d0c09c4a27d2025f5438f49f59f2.zip
runtime: align 12-byte objects to 8 bytes on 32-bit systems
Currently on 32-bit systems 8-byte fields in a struct have an alignment of 4 bytes, which means that atomic instructions may fault. This issue is tracked in #36606. Our current workaround is to allocate memory and put any such atomically accessed fields at the beginning of the object. This workaround fails because the tiny allocator might not align the object right. This case specifically only happens with 12-byte objects because a type's size is rounded up to its alignment. So if e.g. we have a type like: type obj struct { a uint64 b byte } then its size will be 12 bytes, because "a" will require a 4 byte alignment. This argument may be extended to all objects of size 9-15 bytes. So, make this workaround work by specifically aligning such objects to 8 bytes on 32-bit systems. This change leaves a TODO to remove the code once #36606 gets resolved. It also adds a test which will presumably no longer be necessary (the compiler should enforce the right alignment) when it gets resolved as well. Fixes #37262. Change-Id: I3a34e5b014b3c37ed2e5e75e62d71d8640aa42bc Reviewed-on: https://go-review.googlesource.com/c/go/+/254057 Reviewed-by: Cherry Zhang <cherryyz@google.com> Reviewed-by: Austin Clements <austin@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Go Bot <gobot@golang.org> Trust: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime/malloc_test.go')
-rw-r--r--src/runtime/malloc_test.go57
1 files changed, 57 insertions, 0 deletions
diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go
index 5c97f548fd..4ba94d0494 100644
--- a/src/runtime/malloc_test.go
+++ b/src/runtime/malloc_test.go
@@ -12,8 +12,10 @@ import (
"os"
"os/exec"
"reflect"
+ "runtime"
. "runtime"
"strings"
+ "sync/atomic"
"testing"
"time"
"unsafe"
@@ -168,6 +170,61 @@ func TestTinyAlloc(t *testing.T) {
}
}
+var (
+ tinyByteSink *byte
+ tinyUint32Sink *uint32
+ tinyObj12Sink *obj12
+)
+
+type obj12 struct {
+ a uint64
+ b uint32
+}
+
+func TestTinyAllocIssue37262(t *testing.T) {
+ // Try to cause an alignment access fault
+ // by atomically accessing the first 64-bit
+ // value of a tiny-allocated object.
+ // See issue 37262 for details.
+
+ // GC twice, once to reach a stable heap state
+ // and again to make sure we finish the sweep phase.
+ runtime.GC()
+ runtime.GC()
+
+ // Make 1-byte allocations until we get a fresh tiny slot.
+ aligned := false
+ for i := 0; i < 16; i++ {
+ tinyByteSink = new(byte)
+ if uintptr(unsafe.Pointer(tinyByteSink))&0xf == 0xf {
+ aligned = true
+ break
+ }
+ }
+ if !aligned {
+ t.Fatal("unable to get a fresh tiny slot")
+ }
+
+ // Create a 4-byte object so that the current
+ // tiny slot is partially filled.
+ tinyUint32Sink = new(uint32)
+
+ // Create a 12-byte object, which fits into the
+ // tiny slot. If it actually gets place there,
+ // then the field "a" will be improperly aligned
+ // for atomic access on 32-bit architectures.
+ // This won't be true if issue 36606 gets resolved.
+ tinyObj12Sink = new(obj12)
+
+ // Try to atomically access "x.a".
+ atomic.StoreUint64(&tinyObj12Sink.a, 10)
+
+ // Clear the sinks.
+ tinyByteSink = nil
+ tinyUint32Sink = nil
+ tinyObj12Sink = nil
+}
+
func TestPageCacheLeak(t *testing.T) {
defer GOMAXPROCS(GOMAXPROCS(1))
leaked := PageCachePagesLeaked()