aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFilippo Valsorda <filippo@golang.org>2020-10-16 22:30:09 +0200
committerFilippo Valsorda <filippo@golang.org>2020-10-16 22:30:09 +0200
commited9dc25d693c02aeeb8ed084b28b6d7f9489dc5a (patch)
tree607e9be8b09af4947c54461f5f682913871493fb
parentdbc5602d18397d1841cb7b2e8974d472c15dee83 (diff)
parent1984ee00048b63eacd2155cd6d74a2d13e998272 (diff)
downloadgo-ed9dc25d693c02aeeb8ed084b28b6d7f9489dc5a.tar.gz
go-ed9dc25d693c02aeeb8ed084b28b6d7f9489dc5a.zip
[dev.boringcrypto.go1.15] all: merge go1.15.3 into dev.boringcrypto.go1.15
Change-Id: Ie6adbf4564f8a025ff7fa21f31c0cd19ad8bc232
-rw-r--r--misc/cgo/test/testdata/issue41761.go20
-rw-r--r--misc/cgo/test/testdata/issue41761a/a.go14
-rw-r--r--src/bytes/bytes.go50
-rw-r--r--src/cmd/addr2line/addr2line_test.go24
-rw-r--r--src/cmd/cgo/gcc.go15
-rw-r--r--src/cmd/cgo/main.go19
-rw-r--r--src/cmd/cgo/out.go3
-rw-r--r--src/cmd/compile/internal/gc/esc.go4
-rw-r--r--src/cmd/compile/internal/gc/escape.go15
-rw-r--r--src/cmd/compile/internal/gc/gsubr.go3
-rw-r--r--src/cmd/compile/internal/gc/iexport.go6
-rw-r--r--src/cmd/compile/internal/gc/iimport.go5
-rw-r--r--src/cmd/compile/internal/gc/order.go6
-rw-r--r--src/cmd/compile/internal/gc/pgen.go6
-rw-r--r--src/cmd/compile/internal/gc/pgen_test.go10
-rw-r--r--src/cmd/compile/internal/gc/plive.go134
-rw-r--r--src/cmd/compile/internal/gc/range.go4
-rw-r--r--src/cmd/compile/internal/gc/reflect.go10
-rw-r--r--src/cmd/compile/internal/gc/ssa.go31
-rw-r--r--src/cmd/compile/internal/gc/subr.go4
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go12
-rw-r--r--src/cmd/compile/internal/gc/walk.go39
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go2
-rw-r--r--src/cmd/compile/internal/ssa/func.go11
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec.rules4
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go2
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go4
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go7
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go2
-rw-r--r--src/cmd/compile/internal/types/type.go28
-rw-r--r--src/cmd/compile/internal/x86/387.go68
-rw-r--r--src/cmd/go/internal/base/goflags.go17
-rw-r--r--src/cmd/go/internal/version/version.go9
-rw-r--r--src/cmd/go/internal/work/init.go22
-rw-r--r--src/cmd/go/testdata/script/version.txt6
-rw-r--r--src/cmd/internal/obj/s390x/objz.go11
-rw-r--r--src/internal/bytealg/index_generic.go38
-rw-r--r--src/internal/poll/fd_unix.go12
-rw-r--r--src/runtime/chan.go22
-rw-r--r--src/runtime/chan_test.go56
-rw-r--r--src/runtime/export_test.go24
-rw-r--r--src/runtime/internal/atomic/asm_wasm.s10
-rw-r--r--src/runtime/internal/atomic/atomic_test.go10
-rw-r--r--src/runtime/internal/atomic/atomic_wasm.go13
-rw-r--r--src/runtime/mgcmark.go3
-rw-r--r--src/runtime/mgcstack.go2
-rw-r--r--src/runtime/mpagealloc.go13
-rw-r--r--src/runtime/proc_test.go10
-rw-r--r--src/runtime/runtime2.go6
-rw-r--r--src/runtime/select.go19
-rw-r--r--src/runtime/stack.go13
-rw-r--r--src/runtime/stubs32.go2
-rw-r--r--src/runtime/trace/trace_stack_test.go1
-rw-r--r--src/testing/benchmark.go18
-rw-r--r--src/testing/sub_test.go19
-rw-r--r--src/testing/testing.go138
-rw-r--r--test/fixedbugs/issue40629.go69
-rw-r--r--test/fixedbugs/issue40954.go35
-rw-r--r--test/fixedbugs/issue41711.go17
-rw-r--r--test/notinheap.go26
-rw-r--r--test/notinheap2.go48
62 files changed, 769 insertions, 484 deletions
diff --git a/misc/cgo/test/testdata/issue41761.go b/misc/cgo/test/testdata/issue41761.go
new file mode 100644
index 0000000000..919c749251
--- /dev/null
+++ b/misc/cgo/test/testdata/issue41761.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+ typedef struct S S;
+*/
+import "C"
+
+import (
+ "cgotest/issue41761a"
+ "testing"
+)
+
+func test41761(t *testing.T) {
+ var x issue41761a.T
+ _ = (*C.struct_S)(x.X)
+}
diff --git a/misc/cgo/test/testdata/issue41761a/a.go b/misc/cgo/test/testdata/issue41761a/a.go
new file mode 100644
index 0000000000..ca5c18191e
--- /dev/null
+++ b/misc/cgo/test/testdata/issue41761a/a.go
@@ -0,0 +1,14 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue41761a
+
+/*
+ typedef struct S S;
+*/
+import "C"
+
+type T struct {
+ X *C.S
+}
diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
index aa07b9fbc1..ce52649f13 100644
--- a/src/bytes/bytes.go
+++ b/src/bytes/bytes.go
@@ -227,19 +227,26 @@ func IndexAny(s []byte, chars string) int {
continue
}
r, width = utf8.DecodeRune(s[i:])
- if r == utf8.RuneError {
- for _, r = range chars {
- if r == utf8.RuneError {
+ if r != utf8.RuneError {
+ // r is 2 to 4 bytes
+ if len(chars) == width {
+ if chars == string(r) {
return i
}
+ continue
+ }
+ // Use bytealg.IndexString for performance if available.
+ if bytealg.MaxLen >= width {
+ if bytealg.IndexString(chars, string(r)) >= 0 {
+ return i
+ }
+ continue
}
- continue
}
- // r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes
- // package should not import the strings package, use bytealg.IndexString
- // instead. And this does not seem to lose much performance.
- if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 {
- return i
+ for _, ch := range chars {
+ if r == ch {
+ return i
+ }
}
}
return -1
@@ -304,19 +311,26 @@ func LastIndexAny(s []byte, chars string) int {
}
r, size := utf8.DecodeLastRune(s[:i])
i -= size
- if r == utf8.RuneError {
- for _, r = range chars {
- if r == utf8.RuneError {
+ if r != utf8.RuneError {
+ // r is 2 to 4 bytes
+ if len(chars) == size {
+ if chars == string(r) {
return i
}
+ continue
+ }
+ // Use bytealg.IndexString for performance if available.
+ if bytealg.MaxLen >= size {
+ if bytealg.IndexString(chars, string(r)) >= 0 {
+ return i
+ }
+ continue
}
- continue
}
- // r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes
- // package should not import the strings package, use bytealg.IndexString
- // instead. And this does not seem to lose much performance.
- if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 {
- return i
+ for _, ch := range chars {
+ if r == ch {
+ return i
+ }
}
}
return -1
diff --git a/src/cmd/addr2line/addr2line_test.go b/src/cmd/addr2line/addr2line_test.go
index 578d88e432..7973aa2fe1 100644
--- a/src/cmd/addr2line/addr2line_test.go
+++ b/src/cmd/addr2line/addr2line_test.go
@@ -73,29 +73,37 @@ func testAddr2Line(t *testing.T, exepath, addr string) {
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
+
// Debug paths are stored slash-separated, so convert to system-native.
srcPath = filepath.FromSlash(srcPath)
fi2, err := os.Stat(srcPath)
- if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" && strings.HasPrefix(srcPath, gorootFinal) {
- if os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2)) {
- // srcPath has had GOROOT_FINAL substituted for GOROOT, and it doesn't
- // match the actual file. GOROOT probably hasn't been moved to its final
- // location yet, so try the original location instead.
+
+ // If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps
+ // srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been
+ // moved to its final location yet. If so, try the original location instead.
+ if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" &&
+ (os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) {
+ // srcPath is clean, but GOROOT_FINAL itself might not be.
+ // (See https://golang.org/issue/41447.)
+ gorootFinal = filepath.Clean(gorootFinal)
+
+ if strings.HasPrefix(srcPath, gorootFinal) {
fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal))
}
}
+
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
if !os.SameFile(fi1, fi2) {
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
}
- if srcLineNo != "99" {
- t.Fatalf("line number = %v; want 99", srcLineNo)
+ if srcLineNo != "107" {
+ t.Fatalf("line number = %v; want 107", srcLineNo)
}
}
-// This is line 98. The test depends on that.
+// This is line 106. The test depends on that.
func TestAddr2Line(t *testing.T) {
testenv.MustHaveGoBuild(t)
diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go
index a59534ebd0..e512d51fda 100644
--- a/src/cmd/cgo/gcc.go
+++ b/src/cmd/cgo/gcc.go
@@ -2434,6 +2434,18 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
tt := *t
tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}}
tt.Go = c.Ident("struct{}")
+ if dt.Kind == "struct" {
+ // We don't know what the representation of this struct is, so don't let
+ // anyone allocate one on the Go side. As a side effect of this annotation,
+ // pointers to this type will not be considered pointers in Go. They won't
+ // get writebarrier-ed or adjusted during a stack copy. This should handle
+ // all the cases badPointerTypedef used to handle, but hopefully will
+ // continue to work going forward without any more need for cgo changes.
+ tt.NotInHeap = true
+ // TODO: we should probably do the same for unions. Unions can't live
+ // on the Go heap, right? It currently doesn't work for unions because
+ // they are defined as a type alias for struct{}, not a defined type.
+ }
typedef[name.Name] = &tt
break
}
@@ -2504,6 +2516,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
}
t.Go = name
t.BadPointer = sub.BadPointer
+ t.NotInHeap = sub.NotInHeap
if unionWithPointer[sub.Go] {
unionWithPointer[t.Go] = true
}
@@ -2514,6 +2527,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
tt := *t
tt.Go = sub.Go
tt.BadPointer = sub.BadPointer
+ tt.NotInHeap = sub.NotInHeap
typedef[name.Name] = &tt
}
@@ -3022,6 +3036,7 @@ func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
// non-pointers in this type.
// TODO: Currently our best solution is to find these manually and list them as
// they come up. A better solution is desired.
+// Note: DEPRECATED. There is now a better solution. Search for NotInHeap in this file.
func (c *typeConv) badPointerTypedef(dt *dwarf.TypedefType) bool {
if c.badCFType(dt) {
return true
diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go
index 5a7bb3f87b..c72c9abcc1 100644
--- a/src/cmd/cgo/main.go
+++ b/src/cmd/cgo/main.go
@@ -151,7 +151,8 @@ type Type struct {
Go ast.Expr
EnumValues map[string]int64
Typedef string
- BadPointer bool
+ BadPointer bool // this pointer type should be represented as a uintptr (deprecated)
+ NotInHeap bool // this type should have a go:notinheap annotation
}
// A FuncType collects information about a function type in both the C and Go worlds.
@@ -169,35 +170,51 @@ func usage() {
var ptrSizeMap = map[string]int64{
"386": 4,
+ "alpha": 8,
"amd64": 8,
"arm": 4,
"arm64": 8,
+ "m68k": 4,
"mips": 4,
"mipsle": 4,
"mips64": 8,
"mips64le": 8,
+ "nios2": 4,
+ "ppc": 4,
"ppc64": 8,
"ppc64le": 8,
+ "riscv": 4,
"riscv64": 8,
"s390": 4,
"s390x": 8,
+ "sh": 4,
+ "shbe": 4,
+ "sparc": 4,
"sparc64": 8,
}
var intSizeMap = map[string]int64{
"386": 4,
+ "alpha": 8,
"amd64": 8,
"arm": 4,
"arm64": 8,
+ "m68k": 4,
"mips": 4,
"mipsle": 4,
"mips64": 8,
"mips64le": 8,
+ "nios2": 4,
+ "ppc": 4,
"ppc64": 8,
"ppc64le": 8,
+ "riscv": 4,
"riscv64": 8,
"s390": 4,
"s390x": 8,
+ "sh": 4,
+ "shbe": 4,
+ "sparc": 4,
"sparc64": 8,
}
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index 4064f0ae41..cbdb4e0f5b 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -108,6 +108,9 @@ func (p *Package) writeDefs() {
sort.Strings(typedefNames)
for _, name := range typedefNames {
def := typedef[name]
+ if def.NotInHeap {
+ fmt.Fprintf(fgo2, "//go:notinheap\n")
+ }
fmt.Fprintf(fgo2, "type %s ", name)
// We don't have source info for these types, so write them out without source info.
// Otherwise types would look like:
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index f3e9ab78ef..37b0ef8c52 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -377,7 +377,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
return unsafeUintptrTag
}
- if !types.Haspointers(f.Type) { // don't bother tagging for scalars
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
@@ -415,7 +415,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
}
}
- if !types.Haspointers(f.Type) { // don't bother tagging for scalars
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
index 5dc755186e..08000dd374 100644
--- a/src/cmd/compile/internal/gc/escape.go
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -326,7 +326,7 @@ func (e *Escape) stmt(n *Node) {
if typesw && n.Left.Left != nil {
cv := cas.Rlist.First()
k := e.dcl(cv) // type switch variables have no ODCL.
- if types.Haspointers(cv.Type) {
+ if cv.Type.HasPointers() {
ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
}
}
@@ -433,7 +433,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
// nop
- } else if k.derefs >= 0 && !types.Haspointers(n.Type) {
+ } else if k.derefs >= 0 && !n.Type.HasPointers() {
k = e.discardHole()
}
@@ -698,7 +698,7 @@ func (e *Escape) addr(n *Node) EscHole {
e.assignHeap(n.Right, "key of map put", n)
}
- if !types.Haspointers(n.Type) {
+ if !n.Type.HasPointers() {
k = e.discardHole()
}
@@ -811,14 +811,14 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
- if types.Haspointers(args[0].Type.Elem()) {
+ if args[0].Type.Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
- if args[1].Type.IsSlice() && types.Haspointers(args[1].Type.Elem()) {
+ if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
@@ -832,7 +832,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
argument(e.discardHole(), call.Left)
copiedK := e.discardHole()
- if call.Right.Type.IsSlice() && types.Haspointers(call.Right.Type.Elem()) {
+ if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
argument(copiedK, call.Right)
@@ -1029,6 +1029,9 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
if e.curfn == nil {
Fatalf("e.curfn isn't set")
}
+ if n != nil && n.Type != nil && n.Type.NotInHeap() {
+ yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
+ }
n = canonicalNode(n)
loc := &EscLocation{
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 336e870bbd..15a84a8a43 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -32,7 +32,6 @@ package gc
import (
"cmd/compile/internal/ssa"
- "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
@@ -316,7 +315,7 @@ func ggloblnod(nam *Node) {
if nam.Name.Readonly() {
flags = obj.RODATA
}
- if nam.Type != nil && !types.Haspointers(nam.Type) {
+ if nam.Type != nil && !nam.Type.HasPointers() {
flags |= obj.NOPTR
}
Ctxt.Globl(s, nam.Type.Width, flags)
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
index 35b8d985cb..81eb975b08 100644
--- a/src/cmd/compile/internal/gc/iexport.go
+++ b/src/cmd/compile/internal/gc/iexport.go
@@ -492,6 +492,7 @@ func (p *iexporter) doDecl(n *Node) {
w.signature(m.Type)
}
+ w.typeExt(t)
for _, m := range ms.Slice() {
w.methExt(m)
}
@@ -1012,6 +1013,11 @@ func (w *exportWriter) symIdx(s *types.Sym) {
}
}
+func (w *exportWriter) typeExt(t *types.Type) {
+ // Export whether this type is marked notinheap.
+ w.bool(t.NotInHeap())
+}
+
// Inline bodies.
func (w *exportWriter) stmtList(list Nodes) {
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
index 104b5fb79a..d9148eae22 100644
--- a/src/cmd/compile/internal/gc/iimport.go
+++ b/src/cmd/compile/internal/gc/iimport.go
@@ -346,6 +346,7 @@ func (r *importReader) doDecl(n *Node) {
}
t.Methods().Set(ms)
+ r.typeExt(t)
for _, m := range ms {
r.methExt(m)
}
@@ -710,6 +711,10 @@ func (r *importReader) symIdx(s *types.Sym) {
}
}
+func (r *importReader) typeExt(t *types.Type) {
+ t.SetNotInHeap(r.bool())
+}
+
func (r *importReader) doInline(n *Node) {
if len(n.Func.Inl.Body) != 0 {
Fatalf("%v already has inline body", n)
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 6b6107290a..6f7ef01bcf 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -928,7 +928,7 @@ func (o *Order) stmt(n *Node) {
n2.Ninit.Append(tmp2)
}
- r.Left = o.newTemp(r.Right.Left.Type.Elem(), types.Haspointers(r.Right.Left.Type.Elem()))
+ r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
tmp2 := nod(OAS, tmp1, r.Left)
tmp2 = typecheck(tmp2, ctxStmt)
n2.Ninit.Append(tmp2)
@@ -1407,7 +1407,7 @@ func (o *Order) as2(n *Node) {
left := []*Node{}
for ni, l := range n.List.Slice() {
if !l.isBlank() {
- tmp := o.newTemp(l.Type, types.Haspointers(l.Type))
+ tmp := o.newTemp(l.Type, l.Type.HasPointers())
n.List.SetIndex(ni, tmp)
tmplist = append(tmplist, tmp)
left = append(left, l)
@@ -1429,7 +1429,7 @@ func (o *Order) okAs2(n *Node) {
var tmp1, tmp2 *Node
if !n.List.First().isBlank() {
typ := n.Right.Type
- tmp1 = o.newTemp(typ, types.Haspointers(typ))
+ tmp1 = o.newTemp(typ, typ.HasPointers())
}
if !n.List.Second().isBlank() {
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 74654c86bc..662fbbda5f 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -80,8 +80,8 @@ func cmpstackvarlt(a, b *Node) bool {
return a.Name.Used()
}
- ap := types.Haspointers(a.Type)
- bp := types.Haspointers(b.Type)
+ ap := a.Type.HasPointers()
+ bp := b.Type.HasPointers()
if ap != bp {
return ap
}
@@ -176,7 +176,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type.Align))
- if types.Haspointers(n.Type) {
+ if n.Type.HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go
index 89b977de85..b1db29825c 100644
--- a/src/cmd/compile/internal/gc/pgen_test.go
+++ b/src/cmd/compile/internal/gc/pgen_test.go
@@ -20,7 +20,7 @@ func typeWithoutPointers() *types.Type {
func typeWithPointers() *types.Type {
t := types.New(TSTRUCT)
- f := &types.Field{Type: types.New(TPTR)}
+ f := &types.Field{Type: types.NewPtr(types.New(TINT))}
t.SetFields([]*types.Field{f})
return t
}
@@ -181,14 +181,6 @@ func TestStackvarSort(t *testing.T) {
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
}
- // haspointers updates Type.Haspointers as a side effect, so
- // exercise this function on all inputs so that reflect.DeepEqual
- // doesn't produce false positives.
- for i := range want {
- types.Haspointers(want[i].Type)
- types.Haspointers(inp[i].Type)
- }
-
sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) {
t.Error("sort failed")
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index 7e1c0c1a95..09e5487f60 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -140,24 +140,14 @@ type Liveness struct {
regMaps []liveRegMask
cache progeffectscache
-
- // These are only populated if open-coded defers are being used.
- // List of vars/stack slots storing defer args
- openDeferVars []openDeferVarInfo
- // Map from defer arg OpVarDef to the block where the OpVarDef occurs.
- openDeferVardefToBlockMap map[*Node]*ssa.Block
- // Map of blocks that cannot reach a return or exit (panic)
- nonReturnBlocks map[*ssa.Block]bool
-}
-
-type openDeferVarInfo struct {
- n *Node // Var/stack slot storing a defer arg
- varsIndex int // Index of variable in lv.vars
}
// LivenessMap maps from *ssa.Value to LivenessIndex.
type LivenessMap struct {
vals map[ssa.ID]LivenessIndex
+ // The set of live, pointer-containing variables at the deferreturn
+ // call (only set when open-coded defers are used).
+ deferreturn LivenessIndex
}
func (m *LivenessMap) reset() {
@@ -168,6 +158,7 @@ func (m *LivenessMap) reset() {
delete(m.vals, k)
}
}
+ m.deferreturn = LivenessInvalid
}
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
@@ -268,7 +259,7 @@ func (v *varRegVec) AndNot(v1, v2 varRegVec) {
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool {
- return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type)
+ return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@@ -445,7 +436,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
case ssa.LocalSlot:
return mask
case *ssa.Register:
- if ptrOnly && !v.Type.HasHeapPointer() {
+ if ptrOnly && !v.Type.HasPointers() {
return mask
}
regs[0] = loc
@@ -460,7 +451,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
if loc1 == nil {
continue
}
- if ptrOnly && !v.Type.FieldType(i).HasHeapPointer() {
+ if ptrOnly && !v.Type.FieldType(i).HasPointers() {
continue
}
regs[nreg] = loc1.(*ssa.Register)
@@ -542,7 +533,7 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
- lv.livenessMap = LivenessMap{lc.livenessMap.vals}
+ lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessInvalid}
lc.livenessMap.vals = nil
}
if lv.be == nil {
@@ -577,13 +568,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
+ if !t.HasPointers() {
+ // Note: this case ensures that pointers to go:notinheap types
+ // are not considered pointers by garbage collection and stack copying.
+ return
+ }
switch t.Etype {
- case TINT8, TUINT8, TINT16, TUINT16,
- TINT32, TUINT32, TINT64, TUINT64,
- TINT, TUINT, TUINTPTR, TBOOL,
- TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
-
case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
@@ -893,58 +884,12 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
func (lv *Liveness) prologue() {
lv.initcache()
- if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() {
- lv.openDeferVardefToBlockMap = make(map[*Node]*ssa.Block)
- for i, n := range lv.vars {
- if n.Name.OpenDeferSlot() {
- lv.openDeferVars = append(lv.openDeferVars, openDeferVarInfo{n: n, varsIndex: i})
- }
- }
-
- // Find any blocks that cannot reach a return or a BlockExit
- // (panic) -- these must be because of an infinite loop.
- reachesRet := make(map[ssa.ID]bool)
- blockList := make([]*ssa.Block, 0, 256)
-
- for _, b := range lv.f.Blocks {
- if b.Kind == ssa.BlockRet || b.Kind == ssa.BlockRetJmp || b.Kind == ssa.BlockExit {
- blockList = append(blockList, b)
- }
- }
-
- for len(blockList) > 0 {
- b := blockList[0]
- blockList = blockList[1:]
- if reachesRet[b.ID] {
- continue
- }
- reachesRet[b.ID] = true
- for _, e := range b.Preds {
- blockList = append(blockList, e.Block())
- }
- }
-
- lv.nonReturnBlocks = make(map[*ssa.Block]bool)
- for _, b := range lv.f.Blocks {
- if !reachesRet[b.ID] {
- lv.nonReturnBlocks[b] = true
- //fmt.Println("No reach ret", lv.f.Name, b.ID, b.Kind)
- }
- }
- }
-
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
// Walk the block instructions backward and update the block
// effects with the each prog effects.
for j := len(b.Values) - 1; j >= 0; j-- {
- if b.Values[j].Op == ssa.OpVarDef {
- n := b.Values[j].Aux.(*Node)
- if n.Name.OpenDeferSlot() {
- lv.openDeferVardefToBlockMap[n] = b
- }
- }
pos, e := lv.valueEffects(b.Values[j])
regUevar, regKill := lv.regEffects(b.Values[j])
if e&varkill != 0 {
@@ -961,20 +906,6 @@ func (lv *Liveness) prologue() {
}
}
-// markDeferVarsLive marks each variable storing an open-coded defer arg as
-// specially live in block b if the variable definition dominates block b.
-func (lv *Liveness) markDeferVarsLive(b *ssa.Block, newliveout *varRegVec) {
- // Only force computation of dominators if we have a block where we need
- // to specially mark defer args live.
- sdom := lv.f.Sdom()
- for _, info := range lv.openDeferVars {
- defB := lv.openDeferVardefToBlockMap[info.n]
- if sdom.IsAncestorEq(defB, b) {
- newliveout.vars.Set(int32(info.varsIndex))
- }
- }
-}
-
// Solve the liveness dataflow equations.
func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
@@ -1018,23 +949,6 @@ func (lv *Liveness) solve() {
}
}
- if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() &&
- (b.Kind == ssa.BlockExit || lv.nonReturnBlocks[b]) {
- // Open-coded defer args slots must be live
- // everywhere in a function, since a panic can
- // occur (almost) anywhere. Force all appropriate
- // defer arg slots to be live in BlockExit (panic)
- // blocks and in blocks that do not reach a return
- // (because of infinite loop).
- //
- // We are assuming that the defer exit code at
- // BlockReturn/BlockReturnJmp accesses all of the
- // defer args (with pointers), and so keeps them
- // live. This analysis may have to be adjusted if
- // that changes (because of optimizations).
- lv.markDeferVarsLive(b, &newliveout)
- }
-
if !be.liveout.Eq(newliveout) {
change = true
be.liveout.Copy(newliveout)
@@ -1087,6 +1001,17 @@ func (lv *Liveness) epilogue() {
n.Name.SetNeedzero(true)
livedefer.Set(int32(i))
}
+ if n.Name.OpenDeferSlot() {
+ // Open-coded defer args slots must be live
+ // everywhere in a function, since a panic can
+ // occur (almost) anywhere. Because it is live
+ // everywhere, it must be zeroed on entry.
+ livedefer.Set(int32(i))
+ // It was already marked as Needzero when created.
+ if !n.Name.Needzero() {
+ Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ }
+ }
}
}
@@ -1188,6 +1113,17 @@ func (lv *Liveness) epilogue() {
lv.compact(b)
}
+ // If we have an open-coded deferreturn call, make a liveness map for it.
+ if lv.fn.Func.OpenCodedDeferDisallowed() {
+ lv.livenessMap.deferreturn = LivenessInvalid
+ } else {
+ lv.livenessMap.deferreturn = LivenessIndex{
+ stackMapIndex: lv.stackMapSet.add(livedefer),
+ regMapIndex: 0, // entry regMap, containing no live registers
+ isUnsafePoint: false,
+ }
+ }
+
// Done compacting. Throw out the stack map set.
lv.stackMaps = lv.stackMapSet.extractUniqe()
lv.stackMapSet = bvecSet{}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 1cf0a0862f..5434b0167a 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -334,7 +334,7 @@ func walkrange(n *Node) *Node {
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
- if types.Haspointers(t.Elem()) {
+ if t.Elem().HasPointers() {
init = append(init, nod(OAS, hv1, nil))
}
hb := temp(types.Types[TBOOL])
@@ -586,7 +586,7 @@ func arrayClear(n, v1, v2, a *Node) bool {
n.Nbody.Append(nod(OAS, hn, tmp))
var fn *Node
- if a.Type.Elem().HasHeapPointer() {
+ if a.Type.Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.Func.setWBPos(stmt.Pos)
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index ccdb03b8a6..cc3e182c34 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -119,7 +119,7 @@ func bmap(t *types.Type) *types.Type {
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go.
otyp := types.NewPtr(bucket)
- if !types.Haspointers(elemtype) && !types.Haspointers(keytype) {
+ if !elemtype.HasPointers() && !keytype.HasPointers() {
otyp = types.Types[TUINTPTR]
}
overflow := makefield("overflow", otyp)
@@ -754,7 +754,7 @@ var kinds = []int{
// typeptrdata returns the length in bytes of the prefix of t
// containing pointer data. Anything after this offset is scalar data.
func typeptrdata(t *types.Type) int64 {
- if !types.Haspointers(t) {
+ if !t.HasPointers() {
return 0
}
@@ -788,7 +788,7 @@ func typeptrdata(t *types.Type) int64 {
// Find the last field that has pointers.
var lastPtrField *types.Field
for _, t1 := range t.Fields().Slice() {
- if types.Haspointers(t1.Type) {
+ if t1.Type.HasPointers() {
lastPtrField = t1
}
}
@@ -1753,7 +1753,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
for i := range ptrmask {
ptrmask[i] = 0
}
- if !types.Haspointers(t) {
+ if !t.HasPointers() {
return
}
@@ -1822,7 +1822,7 @@ func (p *GCProg) end() {
func (p *GCProg) emit(t *types.Type, offset int64) {
dowidth(t)
- if !types.Haspointers(t) {
+ if !t.HasPointers() {
return
}
if t.Width == int64(Widthptr) {
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index d4d23a2956..3e21450deb 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -4206,7 +4206,7 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
}
- if types.Haspointers(t) {
+ if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
@@ -4308,22 +4308,16 @@ func (s *state) openDeferExit() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
- if types.Haspointers(r.rcvrNode.Type) {
+ if r.rcvrNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
- if types.Haspointers(argNode.Type) {
+ if argNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
- if i == len(s.openDefers)-1 {
- // Record the call of the first defer. This will be used
- // to set liveness info for the deferreturn (which is also
- // used for any location that causes a runtime panic)
- s.f.LastDeferExit = call
- }
s.endBlock()
s.startBlock(bEnd)
}
@@ -4959,7 +4953,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
s.instrument(t, left, true)
- if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
+ if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
@@ -4971,7 +4965,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask,
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
- if skip&skipPtr == 0 && types.Haspointers(t) {
+ if skip&skipPtr == 0 && t.HasPointers() {
s.storeTypePtrs(t, left, right)
}
}
@@ -5043,7 +5037,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
- if !types.Haspointers(ft) {
+ if !ft.HasPointers() {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
@@ -5807,11 +5801,6 @@ type SSAGenState struct {
// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
OnWasmStackSkipped int
-
- // Liveness index for the first function call in the final defer exit code
- // path that we generated. All defer functions and args should be live at
- // this point. This will be used to set the liveness for the deferreturn.
- lastDeferLiveness LivenessIndex
}
// Prog appends a new Prog.
@@ -6056,12 +6045,6 @@ func genssa(f *ssa.Func, pp *Progs) {
// instruction.
s.pp.nextLive = s.livenessMap.Get(v)
- // Remember the liveness index of the first defer call of
- // the last defer exit
- if v.Block.Func.LastDeferExit != nil && v == v.Block.Func.LastDeferExit {
- s.lastDeferLiveness = s.pp.nextLive
- }
-
// Special case for first line in function; move it to the start.
if firstPos != src.NoXPos {
s.SetPos(firstPos)
@@ -6122,7 +6105,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
- s.pp.nextLive = s.lastDeferLiveness
+ s.pp.nextLive = s.livenessMap.deferreturn
gencallret(pp, Deferreturn)
}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 9362c74288..c82eefb63d 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -696,14 +696,14 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
if why != nil {
- *why = fmt.Sprintf(":\n\t%v is go:notinheap, but %v is not", dst.Elem(), src.Elem())
+ *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
}
return OXXX
}
// (b) Disallow string to []T where T is go:notinheap.
if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
if why != nil {
- *why = fmt.Sprintf(":\n\t%v is go:notinheap", dst.Elem())
+ *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
}
return OXXX
}
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index dec4b96fc4..98b52a506a 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -471,10 +471,10 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
}
if l.Type.NotInHeap() {
- yyerror("go:notinheap map key not allowed")
+ yyerror("incomplete (or unallocatable) map key not allowed")
}
if r.Type.NotInHeap() {
- yyerror("go:notinheap map value not allowed")
+ yyerror("incomplete (or unallocatable) map value not allowed")
}
setTypeNode(n, types.NewMap(l.Type, r.Type))
@@ -491,7 +491,7 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
}
if l.Type.NotInHeap() {
- yyerror("chan of go:notinheap type not allowed")
+ yyerror("chan of incomplete (or unallocatable) type not allowed")
}
setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
@@ -2068,12 +2068,6 @@ func typecheck1(n *Node, top int) (res *Node) {
ok |= ctxStmt
n.Left = typecheck(n.Left, ctxType)
checkwidth(n.Left.Type)
- if n.Left.Type != nil && n.Left.Type.NotInHeap() && n.Left.Name.Param.Pragma&NotInHeap == 0 {
- // The type contains go:notinheap types, so it
- // must be marked as such (alternatively, we
- // could silently propagate go:notinheap).
- yyerror("type %v must be go:notinheap", n.Left.Type)
- }
}
t := n.Type
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index b7cf313938..af6602f70c 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -380,9 +380,9 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
switch {
case from.Size() == 2 && from.Align == 2:
return "convT16", false
- case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
+ case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false
- case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
+ case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
return "convT64", false
}
if sc := from.SoleComponent(); sc != nil {
@@ -396,12 +396,12 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
switch tkind {
case 'E':
- if !types.Haspointers(from) {
+ if !from.HasPointers() {
return "convT2Enoptr", true
}
return "convT2E", true
case 'I':
- if !types.Haspointers(from) {
+ if !from.HasPointers() {
return "convT2Inoptr", true
}
return "convT2I", true
@@ -640,7 +640,7 @@ opswitch:
// x = append(...)
r := n.Right
if r.Type.Elem().NotInHeap() {
- yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem())
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
}
switch {
case isAppendOfMake(r):
@@ -1151,6 +1151,9 @@ opswitch:
}
case ONEW:
+ if n.Type.Elem().NotInHeap() {
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
+ }
if n.Esc == EscNone {
if n.Type.Elem().Width >= maxImplicitStackVarSize {
Fatalf("large ONEW with EscNone: %v", n)
@@ -1319,6 +1322,9 @@ opswitch:
l = r
}
t := n.Type
+ if t.Elem().NotInHeap() {
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
if n.Esc == EscNone {
if !isSmallMakeSlice(n) {
Fatalf("non-small OMAKESLICE with EscNone: %v", n)
@@ -1360,10 +1366,6 @@ opswitch:
// When len and cap can fit into int, use makeslice instead of
// makeslice64, which is faster and shorter on 32 bit platforms.
- if t.Elem().NotInHeap() {
- yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
- }
-
len, cap := l, r
fnname := "makeslice64"
@@ -1398,14 +1400,14 @@ opswitch:
t := n.Type
if t.Elem().NotInHeap() {
- Fatalf("%v is go:notinheap; heap allocation disallowed", t.Elem())
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
length := conv(n.Left, types.Types[TINT])
copylen := nod(OLEN, n.Right, nil)
copyptr := nod(OSPTR, n.Right, nil)
- if !types.Haspointers(t.Elem()) && n.Bounded() {
+ if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
// replace make+copy with runtime.mallocgc+runtime.memmove.
@@ -2002,9 +2004,6 @@ func walkprint(nn *Node, init *Nodes) *Node {
}
func callnew(t *types.Type) *Node {
- if t.NotInHeap() {
- yyerror("%v is go:notinheap; heap allocation disallowed", t)
- }
dowidth(t)
n := nod(ONEWOBJ, typename(t), nil)
n.Type = types.NewPtr(t)
@@ -2579,7 +2578,7 @@ func mapfast(t *types.Type) int {
}
switch algtype(t.Key()) {
case AMEM32:
- if !t.Key().HasHeapPointer() {
+ if !t.Key().HasPointers() {
return mapfast32
}
if Widthptr == 4 {
@@ -2587,7 +2586,7 @@ func mapfast(t *types.Type) int {
}
Fatalf("small pointer %v", t.Key())
case AMEM64:
- if !t.Key().HasHeapPointer() {
+ if !t.Key().HasPointers() {
return mapfast64
}
if Widthptr == 8 {
@@ -2734,7 +2733,7 @@ func appendslice(n *Node, init *Nodes) *Node {
nodes.Append(nod(OAS, s, nt))
var ncopy *Node
- if elemtype.HasHeapPointer() {
+ if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
nptr1 := nod(OSLICE, s, nil)
nptr1.Type = s.Type
@@ -2855,7 +2854,7 @@ func isAppendOfMake(n *Node) bool {
// s = s[:n]
// lptr := &l1[0]
// sptr := &s[0]
-// if lptr == sptr || !hasPointers(T) {
+// if lptr == sptr || !T.HasPointers() {
// // growslice did not clear the whole underlying array (or did not get called)
// hp := &s[len(l1)]
// hn := l2 * sizeof(T)
@@ -2936,7 +2935,7 @@ func extendslice(n *Node, init *Nodes) *Node {
hn = conv(hn, types.Types[TUINTPTR])
clrname := "memclrNoHeapPointers"
- hasPointers := types.Haspointers(elemtype)
+ hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
Curfn.Func.setWBPos(n.Pos)
@@ -3072,7 +3071,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
// Also works if b is a string.
//
func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
- if n.Left.Type.Elem().HasHeapPointer() {
+ if n.Left.Type.Elem().HasPointers() {
Curfn.Func.setWBPos(n.Pos)
fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
n.Left = cheapexpr(n.Left, init)
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
index c59ec4c77d..6e72e3825c 100644
--- a/src/cmd/compile/internal/ssa/decompose.go
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -139,7 +139,7 @@ func decomposeStringPhi(v *Value) {
func decomposeSlicePhi(v *Value) {
types := &v.Block.Func.Config.Types
- ptrType := types.BytePtr
+ ptrType := v.Type.Elem().PtrTo()
lenType := types.Int
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 7cf72a8e37..4b9189fb3e 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -33,15 +33,8 @@ type Func struct {
Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
Entry *Block // the entry basic block
- // If we are using open-coded defers, this is the first call to a deferred
- // function in the final defer exit sequence that we generated. This call
- // should be after all defer statements, and will have all args, etc. of
- // all defer calls as live. The liveness info of this call will be used
- // for the deferreturn/ret segment generated for functions with open-coded
- // defers.
- LastDeferExit *Value
- bid idAlloc // block ID allocator
- vid idAlloc // value ID allocator
+ bid idAlloc // block ID allocator
+ vid idAlloc // value ID allocator
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 9967c7b030..ee9ccfb41c 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -1454,7 +1454,7 @@
(XORQ x x) -> (MOVQconst [0])
(XORL x x) -> (MOVLconst [0])
-(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)])
+(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c) << uint64(d))])
(SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)])
(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)])
diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules
index 3fd2be409f..4c677f8418 100644
--- a/src/cmd/compile/internal/ssa/gen/dec.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec.rules
@@ -66,14 +66,14 @@
(Load <typ.Int>
(OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
mem))
-(Store dst (SliceMake ptr len cap) mem) =>
+(Store {t} dst (SliceMake ptr len cap) mem) =>
(Store {typ.Int}
(OffPtr <typ.IntPtr> [2*config.PtrSize] dst)
cap
(Store {typ.Int}
(OffPtr <typ.IntPtr> [config.PtrSize] dst)
len
- (Store {typ.BytePtr} dst ptr mem)))
+ (Store {t.Elem().PtrTo()} dst ptr mem)))
// interface ops
(ITab (IMake itab _)) => itab
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index 6b24371ac7..d1bad529e7 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -235,7 +235,7 @@ func nilcheckelim2(f *Func) {
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
- if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasHeapPointer()) {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 20eab05e9c..72ed1eb62c 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -25454,7 +25454,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
return true
}
// match: (SHLLconst [d] (MOVLconst [c]))
- // result: (MOVLconst [int64(int32(c)) << uint64(d)])
+ // result: (MOVLconst [int64(int32(c) << uint64(d))])
for {
d := v.AuxInt
if v_0.Op != OpAMD64MOVLconst {
@@ -25462,7 +25462,7 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(int32(c)) << uint64(d)
+ v.AuxInt = int64(int32(c) << uint64(d))
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
index cef781ffaa..e0fa9768d9 100644
--- a/src/cmd/compile/internal/ssa/rewritedec.go
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -328,9 +328,10 @@ func rewriteValuedec_OpStore(v *Value) bool {
v.AddArg3(v0, len, v1)
return true
}
- // match: (Store dst (SliceMake ptr len cap) mem)
- // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem)))
+ // match: (Store {t} dst (SliceMake ptr len cap) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem)))
for {
+ t := auxToType(v.Aux)
dst := v_0
if v_1.Op != OpSliceMake {
break
@@ -350,7 +351,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
v2.AuxInt = int64ToAuxInt(config.PtrSize)
v2.AddArg(dst)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
- v3.Aux = typeToAux(typ.BytePtr)
+ v3.Aux = typeToAux(t.Elem().PtrTo())
v3.AddArg3(dst, ptr, mem)
v1.AddArg3(v2, len, v3)
v.AddArg3(v0, cap, v1)
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index c7fb059475..214798a1ab 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -31,7 +31,7 @@ func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
if !ok {
v.Fatalf("store aux is not a type: %s", v.LongString())
}
- if !t.HasHeapPointer() {
+ if !t.HasPointers() {
return false
}
if IsStackAddr(v.Args[0]) {
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 3b7b31c5d6..7d9becfa00 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -1397,14 +1397,9 @@ func (t *Type) IsUntyped() bool {
return false
}
-// TODO(austin): We probably only need HasHeapPointer. See
-// golang.org/cl/73412 for discussion.
-
-func Haspointers(t *Type) bool {
- return Haspointers1(t, false)
-}
-
-func Haspointers1(t *Type, ignoreNotInHeap bool) bool {
+// HasPointers reports whether t contains a heap pointer.
+// Note that this function ignores pointers to go:notinheap types.
+func (t *Type) HasPointers() bool {
switch t.Etype {
case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA:
@@ -1414,34 +1409,27 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool {
if t.NumElem() == 0 { // empty array has no pointers
return false
}
- return Haspointers1(t.Elem(), ignoreNotInHeap)
+ return t.Elem().HasPointers()
case TSTRUCT:
for _, t1 := range t.Fields().Slice() {
- if Haspointers1(t1.Type, ignoreNotInHeap) {
+ if t1.Type.HasPointers() {
return true
}
}
return false
case TPTR, TSLICE:
- return !(ignoreNotInHeap && t.Elem().NotInHeap())
+ return !t.Elem().NotInHeap()
case TTUPLE:
ttup := t.Extra.(*Tuple)
- return Haspointers1(ttup.first, ignoreNotInHeap) || Haspointers1(ttup.second, ignoreNotInHeap)
+ return ttup.first.HasPointers() || ttup.second.HasPointers()
}
return true
}
-// HasHeapPointer reports whether t contains a heap pointer.
-// This is used for write barrier insertion, so it ignores
-// pointers to go:notinheap types.
-func (t *Type) HasHeapPointer() bool {
- return Haspointers1(t, true)
-}
-
func (t *Type) Symbol() *obj.LSym {
return TypeLinkSym(t)
}
@@ -1470,7 +1458,7 @@ func FakeRecvType() *Type {
}
var (
- // TSSA types. Haspointers assumes these are pointer-free.
+ // TSSA types. HasPointers assumes these are pointer-free.
TypeInvalid = newSSA("invalid")
TypeMem = newSSA("mem")
TypeFlags = newSSA("flags")
diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go
index 796aa82f19..594adb2cd5 100644
--- a/src/cmd/compile/internal/x86/387.go
+++ b/src/cmd/compile/internal/x86/387.go
@@ -139,12 +139,18 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// Set precision if needed. 64 bits is the default.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
- p := s.Prog(x86.AFSTCW)
+ // Save AX so we can use it as scratch space.
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
s.AddrScratch(&p.To)
- p = s.Prog(x86.AFLDCW)
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_EXTERN
- p.From.Sym = gc.ControlWord32
+ // Install a 32-bit version of the control word.
+ installControlWord(s, gc.ControlWord32, x86.REG_AX)
+ // Restore AX.
+ p = s.Prog(x86.AMOVL)
+ s.AddrScratch(&p.From)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
}
var op obj.As
@@ -167,8 +173,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
// Restore precision if needed.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
- p := s.Prog(x86.AFLDCW)
- s.AddrScratch(&p.From)
+ restoreControlWord(s)
}
case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
@@ -225,19 +230,11 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL:
push(s, v.Args[0])
- // Save control word.
- p := s.Prog(x86.AFSTCW)
- s.AddrScratch(&p.To)
- p.To.Offset += 4
-
// Load control word which truncates (rounds towards zero).
- p = s.Prog(x86.AFLDCW)
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_EXTERN
- p.From.Sym = gc.ControlWord64trunc
+ installControlWord(s, gc.ControlWord64trunc, v.Reg())
// Now do the conversion.
- p = s.Prog(x86.AFMOVLP)
+ p := s.Prog(x86.AFMOVLP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To)
@@ -247,9 +244,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
// Restore control word.
- p = s.Prog(x86.AFLDCW)
- s.AddrScratch(&p.From)
- p.From.Offset += 4
+ restoreControlWord(s)
case ssa.Op386CVTSS2SD:
// float32 -> float64 is a nop
@@ -373,3 +368,36 @@ func ssaGenBlock387(s *gc.SSAGenState, b, next *ssa.Block) {
ssaGenBlock(s, b, next)
}
+
+// installControlWord saves the current floating-point control
+// word and installs a new one loaded from cw.
+// scratchReg must be an unused register.
+// This call must be paired with restoreControlWord.
+// Bytes 4-5 of the scratch space (s.AddrScratch) are used between
+// this call and restoreControlWord.
+func installControlWord(s *gc.SSAGenState, cw *obj.LSym, scratchReg int16) {
+ // Save current control word.
+ p := s.Prog(x86.AFSTCW)
+ s.AddrScratch(&p.To)
+ p.To.Offset += 4
+
+ // Materialize address of new control word.
+ // Note: this must be a seperate instruction to handle PIE correctly.
+ // See issue 41503.
+ p = s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = cw
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = scratchReg
+
+ // Load replacement control word.
+ p = s.Prog(x86.AFLDCW)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = scratchReg
+}
+func restoreControlWord(s *gc.SSAGenState) {
+ p := s.Prog(x86.AFLDCW)
+ s.AddrScratch(&p.From)
+ p.From.Offset += 4
+}
diff --git a/src/cmd/go/internal/base/goflags.go b/src/cmd/go/internal/base/goflags.go
index 34766134b0..f29cc3cf9a 100644
--- a/src/cmd/go/internal/base/goflags.go
+++ b/src/cmd/go/internal/base/goflags.go
@@ -153,3 +153,20 @@ func SetFromGOFLAGS(flags *flag.FlagSet) {
}
}
}
+
+// InGOFLAGS returns whether GOFLAGS contains the given flag, such as "-mod".
+func InGOFLAGS(flag string) bool {
+ for _, goflag := range GOFLAGS() {
+ name := goflag
+ if strings.HasPrefix(name, "--") {
+ name = name[1:]
+ }
+ if i := strings.Index(name, "="); i >= 0 {
+ name = name[:i]
+ }
+ if name == flag {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go
index ac2ae50155..a5b13653cd 100644
--- a/src/cmd/go/internal/version/version.go
+++ b/src/cmd/go/internal/version/version.go
@@ -53,7 +53,14 @@ var (
func runVersion(cmd *base.Command, args []string) {
if len(args) == 0 {
- if *versionM || *versionV {
+ // If any of this command's flags were passed explicitly, error
+ // out, because they only make sense with arguments.
+ //
+ // Don't error if the flags came from GOFLAGS, since that can be
+ // a reasonable use case. For example, imagine GOFLAGS=-v to
+ // turn "verbose mode" on for all Go commands, which should not
+ // break "go version".
+ if (!base.InGOFLAGS("-m") && *versionM) || (!base.InGOFLAGS("-v") && *versionV) {
fmt.Fprintf(os.Stderr, "go version: flags can only be used with arguments\n")
base.SetExitStatus(2)
return
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index dad3b10111..c168364cd1 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -254,34 +254,18 @@ func buildModeInit() {
case "":
// ok
case "readonly", "vendor", "mod":
- if !cfg.ModulesEnabled && !inGOFLAGS("-mod") {
+ if !cfg.ModulesEnabled && !base.InGOFLAGS("-mod") {
base.Fatalf("build flag -mod=%s only valid when using modules", cfg.BuildMod)
}
default:
base.Fatalf("-mod=%s not supported (can be '', 'mod', 'readonly', or 'vendor')", cfg.BuildMod)
}
if !cfg.ModulesEnabled {
- if cfg.ModCacheRW && !inGOFLAGS("-modcacherw") {
+ if cfg.ModCacheRW && !base.InGOFLAGS("-modcacherw") {
base.Fatalf("build flag -modcacherw only valid when using modules")
}
- if cfg.ModFile != "" && !inGOFLAGS("-mod") {
+ if cfg.ModFile != "" && !base.InGOFLAGS("-mod") {
base.Fatalf("build flag -modfile only valid when using modules")
}
}
}
-
-func inGOFLAGS(flag string) bool {
- for _, goflag := range base.GOFLAGS() {
- name := goflag
- if strings.HasPrefix(name, "--") {
- name = name[1:]
- }
- if i := strings.Index(name, "="); i >= 0 {
- name = name[:i]
- }
- if name == flag {
- return true
- }
- }
- return false
-}
diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt
index 0123ac6d53..87cb6befe9 100644
--- a/src/cmd/go/testdata/script/version.txt
+++ b/src/cmd/go/testdata/script/version.txt
@@ -9,6 +9,12 @@ stderr 'with arguments'
! go version -v
stderr 'with arguments'
+# Neither of the two flags above should be an issue via GOFLAGS.
+env GOFLAGS='-m -v'
+go version
+stdout '^go version'
+env GOFLAGS=
+
env GO111MODULE=on
# Skip the builds below if we are running in short mode.
[short] skip
diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go
index b14dc810fa..ef6335d849 100644
--- a/src/cmd/internal/obj/s390x/objz.go
+++ b/src/cmd/internal/obj/s390x/objz.go
@@ -283,17 +283,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
ACMPUBNE:
q = p
p.Mark |= BRANCH
- if p.Pcond != nil {
- q := p.Pcond
- for q.As == obj.ANOP {
- q = q.Link
- p.Pcond = q
- }
- }
-
- case obj.ANOP:
- q.Link = p.Link /* q is non-nop */
- p.Link.Mark |= p.Mark
default:
q = p
diff --git a/src/internal/bytealg/index_generic.go b/src/internal/bytealg/index_generic.go
index 83345f1013..98e859f925 100644
--- a/src/internal/bytealg/index_generic.go
+++ b/src/internal/bytealg/index_generic.go
@@ -16,42 +16,8 @@ func Index(a, b []byte) int {
// IndexString returns the index of the first instance of b in a, or -1 if b is not present in a.
// Requires 2 <= len(b) <= MaxLen.
-func IndexString(s, substr string) int {
- // This is a partial copy of strings.Index, here because bytes.IndexAny and bytes.LastIndexAny
- // call bytealg.IndexString. Some platforms have an optimized assembly version of this function.
- // This implementation is used for those that do not. Although the pure Go implementation here
- // works for the case of len(b) > MaxLen, we do not require that its assembly implementation also
- // supports the case of len(b) > MaxLen. And we do not guarantee that this function supports the
- // case of len(b) > MaxLen.
- n := len(substr)
- c0 := substr[0]
- c1 := substr[1]
- i := 0
- t := len(s) - n + 1
- fails := 0
- for i < t {
- if s[i] != c0 {
- o := IndexByteString(s[i:t], c0)
- if o < 0 {
- return -1
- }
- i += o
- }
- if s[i+1] == c1 && s[i:i+n] == substr {
- return i
- }
- i++
- fails++
- if fails >= 4+i>>4 && i < t {
- // See comment in src/bytes/bytes.go.
- j := IndexRabinKarp(s[i:], substr)
- if j < 0 {
- return -1
- }
- return i + j
- }
- }
- return -1
+func IndexString(a, b string) int {
+ panic("unimplemented")
}
// Cutover reports the number of failures of IndexByte we should tolerate
diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go
index 4872fa9851..61ffa82b14 100644
--- a/src/internal/poll/fd_unix.go
+++ b/src/internal/poll/fd_unix.go
@@ -152,7 +152,7 @@ func (fd *FD) Read(p []byte) (int, error) {
p = p[:maxRW]
}
for {
- n, err := ignoringEINTR(syscall.Read, fd.Sysfd, p)
+ n, err := ignoringEINTR(func() (int, error) { return syscall.Read(fd.Sysfd, p) })
if err != nil {
n = 0
if err == syscall.EAGAIN && fd.pd.pollable() {
@@ -264,7 +264,7 @@ func (fd *FD) Write(p []byte) (int, error) {
if fd.IsStream && max-nn > maxRW {
max = nn + maxRW
}
- n, err := ignoringEINTR(syscall.Write, fd.Sysfd, p[nn:max])
+ n, err := ignoringEINTR(func() (int, error) { return syscall.Write(fd.Sysfd, p[nn:max]) })
if n > 0 {
nn += n
}
@@ -423,7 +423,7 @@ func (fd *FD) ReadDirent(buf []byte) (int, error) {
}
defer fd.decref()
for {
- n, err := ignoringEINTR(syscall.ReadDirent, fd.Sysfd, buf)
+ n, err := ignoringEINTR(func() (int, error) { return syscall.ReadDirent(fd.Sysfd, buf) })
if err != nil {
n = 0
if err == syscall.EAGAIN && fd.pd.pollable() {
@@ -514,7 +514,7 @@ func (fd *FD) WriteOnce(p []byte) (int, error) {
return 0, err
}
defer fd.writeUnlock()
- return ignoringEINTR(syscall.Write, fd.Sysfd, p)
+ return ignoringEINTR(func() (int, error) { return syscall.Write(fd.Sysfd, p) })
}
// RawRead invokes the user-defined function f for a read operation.
@@ -562,9 +562,9 @@ func (fd *FD) RawWrite(f func(uintptr) bool) error {
// installed without setting SA_RESTART. None of these are the common case,
// but there are enough of them that it seems that we can't avoid
// an EINTR loop.
-func ignoringEINTR(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) {
+func ignoringEINTR(fn func() (int, error)) (int, error) {
for {
- n, err := fn(fd, p)
+ n, err := fn()
if err != syscall.EINTR {
return n, err
}
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index f6f4ffd02e..d5daa4b13d 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -250,6 +250,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
// Ensure the value being sent is kept alive until the
// receiver copies it out. The sudog has a pointer to the
@@ -564,6 +569,11 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
mysg.c = c
gp.param = nil
c.recvq.enqueue(mysg)
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
// someone woke us up
@@ -641,7 +651,19 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
// There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
+ // Set activeStackChans here instead of before we try parking
+ // because we could self-deadlock in stack growth on the
+ // channel lock.
gp.activeStackChans = true
+ // Mark that it's safe for stack shrinking to occur now,
+ // because any thread acquiring this G's stack for shrinking
+ // is guaranteed to observe activeStackChans after this store.
+ atomic.Store8(&gp.parkingOnChan, 0)
+ // Make sure we unlock after setting activeStackChans and
+ // unsetting parkingOnChan. The moment we unlock chanLock
+ // we risk gp getting readied by a channel operation and
+ // so gp could continue running before everything before
+ // the unlock is visible (even to gp itself).
unlock((*mutex)(chanLock))
return true
}
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index 039a086e9b..756bbbeccf 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -623,6 +623,62 @@ func TestShrinkStackDuringBlockedSend(t *testing.T) {
<-done
}
+func TestNoShrinkStackWhileParking(t *testing.T) {
+ // The goal of this test is to trigger a "racy sudog adjustment"
+ // throw. Basically, there's a window between when a goroutine
+ // becomes available for preemption for stack scanning (and thus,
+ // stack shrinking) but before the goroutine has fully parked on a
+ // channel. See issue 40641 for more details on the problem.
+ //
+ // The way we try to induce this failure is to set up two
+ // goroutines: a sender and a reciever that communicate across
+ // a channel. We try to set up a situation where the sender
+ // grows its stack temporarily then *fully* blocks on a channel
+ // often. Meanwhile a GC is triggered so that we try to get a
+ // mark worker to shrink the sender's stack and race with the
+ // sender parking.
+ //
+ // Unfortunately the race window here is so small that we
+ // either need a ridiculous number of iterations, or we add
+ // "usleep(1000)" to park_m, just before the unlockf call.
+ const n = 10
+ send := func(c chan<- int, done chan struct{}) {
+ for i := 0; i < n; i++ {
+ c <- i
+ // Use lots of stack briefly so that
+ // the GC is going to want to shrink us
+ // when it scans us. Make sure not to
+ // do any function calls otherwise
+ // in order to avoid us shrinking ourselves
+ // when we're preempted.
+ stackGrowthRecursive(20)
+ }
+ done <- struct{}{}
+ }
+ recv := func(c <-chan int, done chan struct{}) {
+ for i := 0; i < n; i++ {
+ // Sleep here so that the sender always
+ // fully blocks.
+ time.Sleep(10 * time.Microsecond)
+ <-c
+ }
+ done <- struct{}{}
+ }
+ for i := 0; i < n*20; i++ {
+ c := make(chan int)
+ done := make(chan struct{})
+ go recv(c, done)
+ go send(c, done)
+ // Wait a little bit before triggering
+ // the GC to make sure the sender and
+ // reciever have gotten into their groove.
+ time.Sleep(50 * time.Microsecond)
+ runtime.GC()
+ <-done
+ <-done
+ }
+}
+
func TestSelectDuplicateChannel(t *testing.T) {
// This test makes sure we can queue a G on
// the same channel multiple times.
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 5ab03f3f99..b268d5a058 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -360,7 +360,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -758,11 +762,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -902,7 +902,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
@@ -983,9 +986,8 @@ func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
}
func MSpanCountAlloc(bits []byte) int {
- s := mspan{
- nelems: uintptr(len(bits) * 8),
- gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])),
- }
+ s := (*mspan)(mheap_.spanalloc.alloc())
+ s.nelems = uintptr(len(bits) * 8)
+ s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
return s.countAlloc()
}
diff --git a/src/runtime/internal/atomic/asm_wasm.s b/src/runtime/internal/atomic/asm_wasm.s
new file mode 100644
index 0000000000..7c33cb1ee9
--- /dev/null
+++ b/src/runtime/internal/atomic/asm_wasm.s
@@ -0,0 +1,10 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
+ MOVD ptr+0(FP), R0
+ MOVD val+8(FP), 0(R0)
+ RET
diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go
index 0c1125c558..b0a8fa0610 100644
--- a/src/runtime/internal/atomic/atomic_test.go
+++ b/src/runtime/internal/atomic/atomic_test.go
@@ -220,3 +220,13 @@ func TestBitwiseContended(t *testing.T) {
}
}
}
+
+func TestStorepNoWB(t *testing.T) {
+ var p [2]*int
+ for i := range p {
+ atomic.StorepNoWB(unsafe.Pointer(&p[i]), unsafe.Pointer(new(int)))
+ }
+ if p[0] == p[1] {
+ t.Error("Bad escape analysis of StorepNoWB")
+ }
+}
diff --git a/src/runtime/internal/atomic/atomic_wasm.go b/src/runtime/internal/atomic/atomic_wasm.go
index 9037c2f7c8..2c0c3a8174 100644
--- a/src/runtime/internal/atomic/atomic_wasm.go
+++ b/src/runtime/internal/atomic/atomic_wasm.go
@@ -153,14 +153,11 @@ func Store64(ptr *uint64, val uint64) {
*ptr = val
}
-//go:notinheap
-type noWB struct{}
-
-//go:noinline
-//go:nosplit
-func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) {
- *(**noWB)(ptr) = (*noWB)(val)
-}
+// StorepNoWB performs *ptr = val atomically and without a write
+// barrier.
+//
+// NO go:noescape annotation; see atomic_pointer.go.
+func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
//go:nosplit
//go:noinline
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index fe988c46d9..b2fbcd4105 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -937,7 +937,8 @@ func scanstack(gp *g, gcw *gcWork) {
x := state.head
state.head = x.next
if stackTraceDebug {
- for _, obj := range x.obj[:x.nobj] {
+ for i := 0; i < x.nobj; i++ {
+ obj := &x.obj[i]
if obj.typ == nil { // reachable
continue
}
diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go
index 211d882fa6..8eb941a328 100644
--- a/src/runtime/mgcstack.go
+++ b/src/runtime/mgcstack.go
@@ -167,8 +167,6 @@ func (obj *stackObject) setType(typ *_type) {
// A stackScanState keeps track of the state used during the GC walk
// of a goroutine.
-//
-//go:notinheap
type stackScanState struct {
cache pcvalueCache
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 8b3c62c375..c90a6378bd 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := s.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}
diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go
index de4dec36ce..767bde15b4 100644
--- a/src/runtime/proc_test.go
+++ b/src/runtime/proc_test.go
@@ -523,9 +523,17 @@ func BenchmarkPingPongHog(b *testing.B) {
<-done
}
+var padData [128]uint64
+
func stackGrowthRecursive(i int) {
var pad [128]uint64
- if i != 0 && pad[0] == 0 {
+ pad = padData
+ for j := range pad {
+ if pad[j] != 0 {
+ return
+ }
+ }
+ if i != 0 {
stackGrowthRecursive(i - 1)
}
}
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index cffdb0bf27..814364aa42 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -447,6 +447,10 @@ type g struct {
// copying needs to acquire channel locks to protect these
// areas of the stack.
activeStackChans bool
+ // parkingOnChan indicates that the goroutine is about to
+ // park on a chansend or chanrecv. Used to signal an unsafe point
+ // for stack shrinking. It's a boolean value, but is updated atomically.
+ parkingOnChan uint8
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
@@ -914,8 +918,6 @@ type _defer struct {
// handling during stack growth: because they are pointer-typed and
// _panic values only live on the stack, regular stack pointer
// adjustment takes care of them.
-//
-//go:notinheap
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
diff --git a/src/runtime/select.go b/src/runtime/select.go
index a069e3e050..69d255712a 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -7,6 +7,7 @@ package runtime
// This file contains the implementation of Go select statements.
import (
+ "runtime/internal/atomic"
"unsafe"
)
@@ -77,7 +78,20 @@ func selunlock(scases []scase, lockorder []uint16) {
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
// There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
+ // Set activeStackChans here instead of before we try parking
+ // because we could self-deadlock in stack growth on a
+ // channel lock.
gp.activeStackChans = true
+ // Mark that it's safe for stack shrinking to occur now,
+ // because any thread acquiring this G's stack for shrinking
+ // is guaranteed to observe activeStackChans after this store.
+ atomic.Store8(&gp.parkingOnChan, 0)
+ // Make sure we unlock after setting activeStackChans and
+ // unsetting parkingOnChan. The moment we unlock any of the
+ // channel locks we risk gp getting readied by a channel operation
+ // and so gp could continue running before everything before the
+ // unlock is visible (even to gp itself).
+
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
@@ -316,6 +330,11 @@ loop:
// wait for someone to wake us up
gp.param = nil
+ // Signal to anyone trying to shrink our stack that we're about
+ // to park on a channel. The window between when this G's status
+ // changes and when we set gp.activeStackChans is not safe for
+ // stack shrinking.
+ atomic.Store8(&gp.parkingOnChan, 1)
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
gp.activeStackChans = false
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 0e930f60db..f38ba56c80 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -864,6 +864,13 @@ func copystack(gp *g, newsize uintptr) {
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
if !gp.activeStackChans {
+ if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
+ // It's not safe for someone to shrink this stack while we're actively
+ // parking on a channel, but it is safe to grow since we do that
+ // ourselves and explicitly don't want to synchronize with channels
+ // since we could self-deadlock.
+ throw("racy sudog adjustment due to parking on channel")
+ }
adjustsudogs(gp, &adjinfo)
} else {
// sudogs may be pointing in to the stack and gp has
@@ -1103,7 +1110,11 @@ func isShrinkStackSafe(gp *g) bool {
// We also can't copy the stack if we're at an asynchronous
// safe-point because we don't have precise pointer maps for
// all frames.
- return gp.syscallsp == 0 && !gp.asyncSafePoint
+ //
+ // We also can't *shrink* the stack in the window between the
+ // goroutine calling gopark to park on a channel and
+ // gp.activeStackChans being set.
+ return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
}
// Maybe shrink the stack being used by gp.
diff --git a/src/runtime/stubs32.go b/src/runtime/stubs32.go
index a7f52f6b9e..c4715fe989 100644
--- a/src/runtime/stubs32.go
+++ b/src/runtime/stubs32.go
@@ -11,4 +11,4 @@ import "unsafe"
// Declarations for runtime services implemented in C or assembly that
// are only present on 32 bit systems.
-func call16(fn, arg unsafe.Pointer, n, retoffset uint32)
+func call16(typ, fn, arg unsafe.Pointer, n, retoffset uint32)
diff --git a/src/runtime/trace/trace_stack_test.go b/src/runtime/trace/trace_stack_test.go
index cfc0419b72..f856fdcd01 100644
--- a/src/runtime/trace/trace_stack_test.go
+++ b/src/runtime/trace/trace_stack_test.go
@@ -252,6 +252,7 @@ func TestTraceSymbolize(t *testing.T) {
{trace.EvGoSysCall, []frame{
{"syscall.read", 0},
{"syscall.Read", 0},
+ {"internal/poll.(*FD).Read.func1", 0},
{"internal/poll.ignoringEINTR", 0},
{"internal/poll.(*FD).Read", 0},
{"os.(*File).read", 0},
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
index 52766005bf..e9687bf26d 100644
--- a/src/testing/benchmark.go
+++ b/src/testing/benchmark.go
@@ -242,7 +242,7 @@ func (b *B) run1() bool {
if b.skipped {
tag = "SKIP"
}
- if b.chatty && (len(b.output) > 0 || b.finished) {
+ if b.chatty != nil && (len(b.output) > 0 || b.finished) {
b.trimOutput()
fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output)
}
@@ -523,10 +523,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
}
main := &B{
common: common{
- name: "Main",
- w: os.Stdout,
- chatty: *chatty,
- bench: true,
+ name: "Main",
+ w: os.Stdout,
+ bench: true,
},
importPath: importPath,
benchFunc: func(b *B) {
@@ -537,6 +536,9 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
benchTime: benchTime,
context: ctx,
}
+ if Verbose() {
+ main.chatty = newChattyPrinter(main.w)
+ }
main.runN(1)
return !main.failed
}
@@ -549,7 +551,7 @@ func (ctx *benchContext) processBench(b *B) {
benchName := benchmarkName(b.name, procs)
// If it's chatty, we've already printed this information.
- if !b.chatty {
+ if b.chatty == nil {
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
}
// Recompute the running time for all but the first iteration.
@@ -576,7 +578,7 @@ func (ctx *benchContext) processBench(b *B) {
continue
}
results := r.String()
- if b.chatty {
+ if b.chatty != nil {
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
}
if *benchmarkMemory || b.showAllocResult {
@@ -639,7 +641,7 @@ func (b *B) Run(name string, f func(b *B)) bool {
atomic.StoreInt32(&sub.hasSub, 1)
}
- if b.chatty {
+ if b.chatty != nil {
labelsOnce.Do(func() {
fmt.Printf("goos: %s\n", runtime.GOOS)
fmt.Printf("goarch: %s\n", runtime.GOARCH)
diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go
index 8eb0084b1c..5ed3fc4afe 100644
--- a/src/testing/sub_test.go
+++ b/src/testing/sub_test.go
@@ -483,10 +483,12 @@ func TestTRun(t *T) {
signal: make(chan bool),
name: "Test",
w: buf,
- chatty: tc.chatty,
},
context: ctx,
}
+ if tc.chatty {
+ root.chatty = newChattyPrinter(root.w)
+ }
ok := root.Run(tc.desc, tc.f)
ctx.release()
@@ -665,11 +667,13 @@ func TestBRun(t *T) {
signal: make(chan bool),
name: "root",
w: buf,
- chatty: tc.chatty,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
benchTime: benchTimeFlag{d: 1 * time.Microsecond},
}
+ if tc.chatty {
+ root.chatty = newChattyPrinter(root.w)
+ }
root.runN(1)
if ok != !tc.failed {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed)
@@ -741,9 +745,13 @@ func TestParallelSub(t *T) {
}
}
-type funcWriter func([]byte) (int, error)
+type funcWriter struct {
+ write func([]byte) (int, error)
+}
-func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) }
+func (fw *funcWriter) Write(b []byte) (int, error) {
+ return fw.write(b)
+}
func TestRacyOutput(t *T) {
var runs int32 // The number of running Writes
@@ -761,9 +769,10 @@ func TestRacyOutput(t *T) {
var wg sync.WaitGroup
root := &T{
- common: common{w: funcWriter(raceDetector), chatty: true},
+ common: common{w: &funcWriter{raceDetector}},
context: newTestContext(1, newMatcher(regexp.MatchString, "", "")),
}
+ root.chatty = newChattyPrinter(root.w)
root.Run("", func(t *T) {
for i := 0; i < 100; i++ {
wg.Add(1)
diff --git a/src/testing/testing.go b/src/testing/testing.go
index bf83df8863..0055a4cfe2 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -325,7 +325,6 @@ var (
cpuListStr *string
parallel *int
testlog *string
- printer *testPrinter
haveExamples bool // are there examples?
@@ -335,55 +334,45 @@ var (
numFailed uint32 // number of test failures
)
-type testPrinter struct {
- chatty bool
-
+type chattyPrinter struct {
+ w io.Writer
lastNameMu sync.Mutex // guards lastName
lastName string // last printed test name in chatty mode
}
-func newTestPrinter(chatty bool) *testPrinter {
- return &testPrinter{
- chatty: chatty,
- }
+func newChattyPrinter(w io.Writer) *chattyPrinter {
+ return &chattyPrinter{w: w}
}
-func (p *testPrinter) Print(testName, out string) {
- p.Fprint(os.Stdout, testName, out)
+// Updatef prints a message about the status of the named test to w.
+//
+// The formatted message must include the test name itself.
+func (p *chattyPrinter) Updatef(testName, format string, args ...interface{}) {
+ p.lastNameMu.Lock()
+ defer p.lastNameMu.Unlock()
+
+ // Since the message already implies an association with a specific new test,
+ // we don't need to check what the old test name was or log an extra CONT line
+ // for it. (We're updating it anyway, and the current message already includes
+ // the test name.)
+ p.lastName = testName
+ fmt.Fprintf(p.w, format, args...)
}
-func (p *testPrinter) Fprint(w io.Writer, testName, out string) {
+// Printf prints a message, generated by the named test, that does not
+// necessarily mention that tests's name itself.
+func (p *chattyPrinter) Printf(testName, format string, args ...interface{}) {
p.lastNameMu.Lock()
defer p.lastNameMu.Unlock()
- if !p.chatty ||
- strings.HasPrefix(out, "--- PASS: ") ||
- strings.HasPrefix(out, "--- FAIL: ") ||
- strings.HasPrefix(out, "--- SKIP: ") ||
- strings.HasPrefix(out, "=== RUN ") ||
- strings.HasPrefix(out, "=== CONT ") ||
- strings.HasPrefix(out, "=== PAUSE ") {
- // If we're buffering test output (!p.chatty), we don't really care which
- // test is emitting which line so long as they are serialized.
- //
- // If the message already implies an association with a specific new test,
- // we don't need to check what the old test name was or log an extra CONT
- // line for it. (We're updating it anyway, and the current message already
- // includes the test name.)
- p.lastName = testName
- fmt.Fprint(w, out)
- return
- }
-
if p.lastName == "" {
p.lastName = testName
} else if p.lastName != testName {
- // Always printed as-is, with 0 decoration or indentation. So, we skip
- // printing to w.
- fmt.Printf("=== CONT %s\n", testName)
+ fmt.Fprintf(p.w, "=== CONT %s\n", testName)
p.lastName = testName
}
- fmt.Fprint(w, out)
+
+ fmt.Fprintf(p.w, format, args...)
}
// The maximum number of stack frames to go through when skipping helper functions for
@@ -405,12 +394,12 @@ type common struct {
cleanupName string // Name of the cleanup function.
cleanupPc []uintptr // The stack trace at the point where Cleanup was called.
- chatty bool // A copy of the chatty flag.
- bench bool // Whether the current test is a benchmark.
- finished bool // Test function has completed.
- hasSub int32 // Written atomically.
- raceErrors int // Number of races detected during test.
- runner string // Function name of tRunner running the test.
+ chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set.
+ bench bool // Whether the current test is a benchmark.
+ finished bool // Test function has completed.
+ hasSub int32 // Written atomically.
+ raceErrors int // Number of races detected during test.
+ runner string // Function name of tRunner running the test.
parent *common
level int // Nesting depth of test or benchmark.
@@ -572,12 +561,31 @@ func (c *common) flushToParent(testName, format string, args ...interface{}) {
p.mu.Lock()
defer p.mu.Unlock()
- printer.Fprint(p.w, testName, fmt.Sprintf(format, args...))
-
c.mu.Lock()
defer c.mu.Unlock()
- io.Copy(p.w, bytes.NewReader(c.output))
- c.output = c.output[:0]
+
+ if len(c.output) > 0 {
+ format += "%s"
+ args = append(args[:len(args):len(args)], c.output)
+ c.output = c.output[:0] // but why?
+ }
+
+ if c.chatty != nil && p.w == c.chatty.w {
+ // We're flushing to the actual output, so track that this output is
+ // associated with a specific test (and, specifically, that the next output
+ // is *not* associated with that test).
+ //
+ // Moreover, if c.output is non-empty it is important that this write be
+ // atomic with respect to the output of other tests, so that we don't end up
+ // with confusing '=== CONT' lines in the middle of our '--- PASS' block.
+ // Neither humans nor cmd/test2json can parse those easily.
+ // (See https://golang.org/issue/40771.)
+ c.chatty.Updatef(testName, format, args...)
+ } else {
+ // We're flushing to the output buffer of the parent test, which will
+ // itself follow a test-name header when it is finally flushed to stdout.
+ fmt.Fprintf(p.w, format, args...)
+ }
}
type indenter struct {
@@ -746,13 +754,13 @@ func (c *common) logDepth(s string, depth int) {
}
panic("Log in goroutine after " + c.name + " has completed")
} else {
- if c.chatty {
+ if c.chatty != nil {
if c.bench {
// Benchmarks don't print === CONT, so we should skip the test
// printer and just print straight to stdout.
fmt.Print(c.decorate(s, depth+1))
} else {
- printer.Print(c.name, c.decorate(s, depth+1))
+ c.chatty.Printf(c.name, "%s", c.decorate(s, depth+1))
}
return
@@ -983,34 +991,22 @@ func (t *T) Parallel() {
t.parent.sub = append(t.parent.sub, t)
t.raceErrors += race.Errors()
- if t.chatty {
- // Print directly to root's io.Writer so there is no delay.
- root := t.parent
- for ; root.parent != nil; root = root.parent {
- }
- root.mu.Lock()
+ if t.chatty != nil {
// Unfortunately, even though PAUSE indicates that the named test is *no
// longer* running, cmd/test2json interprets it as changing the active test
// for the purpose of log parsing. We could fix cmd/test2json, but that
// won't fix existing deployments of third-party tools that already shell
// out to older builds of cmd/test2json — so merely fixing cmd/test2json
// isn't enough for now.
- printer.Fprint(root.w, t.name, fmt.Sprintf("=== PAUSE %s\n", t.name))
- root.mu.Unlock()
+ t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name)
}
t.signal <- true // Release calling test.
<-t.parent.barrier // Wait for the parent test to complete.
t.context.waitParallel()
- if t.chatty {
- // Print directly to root's io.Writer so there is no delay.
- root := t.parent
- for ; root.parent != nil; root = root.parent {
- }
- root.mu.Lock()
- printer.Fprint(root.w, t.name, fmt.Sprintf("=== CONT %s\n", t.name))
- root.mu.Unlock()
+ if t.chatty != nil {
+ t.chatty.Updatef(t.name, "=== CONT %s\n", t.name)
}
t.start = time.Now()
@@ -1161,14 +1157,8 @@ func (t *T) Run(name string, f func(t *T)) bool {
}
t.w = indenter{&t.common}
- if t.chatty {
- // Print directly to root's io.Writer so there is no delay.
- root := t.parent
- for ; root.parent != nil; root = root.parent {
- }
- root.mu.Lock()
- printer.Fprint(root.w, t.name, fmt.Sprintf("=== RUN %s\n", t.name))
- root.mu.Unlock()
+ if t.chatty != nil {
+ t.chatty.Updatef(t.name, "=== RUN %s\n", t.name)
}
// Instead of reducing the running count of this test before calling the
// tRunner and increasing it afterwards, we rely on tRunner keeping the
@@ -1333,8 +1323,6 @@ func (m *M) Run() (code int) {
flag.Parse()
}
- printer = newTestPrinter(Verbose())
-
if *parallel < 1 {
fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer")
flag.Usage()
@@ -1379,7 +1367,7 @@ func (t *T) report() {
format := "--- %s: %s (%s)\n"
if t.Failed() {
t.flushToParent(t.name, format, "FAIL", t.name, dstr)
- } else if t.chatty {
+ } else if t.chatty != nil {
if t.Skipped() {
t.flushToParent(t.name, format, "SKIP", t.name, dstr)
} else {
@@ -1440,10 +1428,12 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT
signal: make(chan bool),
barrier: make(chan bool),
w: os.Stdout,
- chatty: *chatty,
},
context: ctx,
}
+ if Verbose() {
+ t.chatty = newChattyPrinter(t.w)
+ }
tRunner(t, func(t *T) {
for _, test := range tests {
t.Run(test.Name, test.F)
diff --git a/test/fixedbugs/issue40629.go b/test/fixedbugs/issue40629.go
new file mode 100644
index 0000000000..c6ef408f49
--- /dev/null
+++ b/test/fixedbugs/issue40629.go
@@ -0,0 +1,69 @@
+// run
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+const N = 40
+
+func main() {
+ var x [N]int // stack-allocated memory
+ for i := range x {
+ x[i] = 0x999
+ }
+
+ // This defer checks to see if x is uncorrupted.
+ defer func(p *[N]int) {
+ recover()
+ for i := range p {
+ if p[i] != 0x999 {
+ for j := range p {
+ fmt.Printf("p[%d]=0x%x\n", j, p[j])
+ }
+ panic("corrupted stack variable")
+ }
+ }
+ }(&x)
+
+ // This defer starts a new goroutine, which will (hopefully)
+ // overwrite x on the garbage stack.
+ defer func() {
+ c := make(chan bool)
+ go func() {
+ useStack(1000)
+ c <- true
+ }()
+ <-c
+
+ }()
+
+ // This defer causes a stack copy.
+ // The old stack is now garbage.
+ defer func() {
+ useStack(1000)
+ }()
+
+ // Trigger a segfault.
+ *g = 0
+
+ // Make the return statement unreachable.
+ // That makes the stack map at the deferreturn call empty.
+ // In particular, the argument to the first defer is not
+ // marked as a pointer, so it doesn't get adjusted
+ // during the stack copy.
+ for {
+ }
+}
+
+var g *int64
+
+func useStack(n int) {
+ if n == 0 {
+ return
+ }
+ useStack(n - 1)
+}
diff --git a/test/fixedbugs/issue40954.go b/test/fixedbugs/issue40954.go
new file mode 100644
index 0000000000..53e9ccf387
--- /dev/null
+++ b/test/fixedbugs/issue40954.go
@@ -0,0 +1,35 @@
+// run
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "unsafe"
+)
+
+//go:notinheap
+type S struct{ x int }
+
+func main() {
+ var i int
+ p := (*S)(unsafe.Pointer(uintptr(unsafe.Pointer(&i))))
+ v := uintptr(unsafe.Pointer(p))
+ // p is a pointer to a go:notinheap type. Like some C libraries,
+ // we stored an integer in that pointer. That integer just happens
+ // to be the address of i.
+ // v is also the address of i.
+ // p has a base type which is marked go:notinheap, so it
+ // should not be adjusted when the stack is copied.
+ recurse(100, p, v)
+}
+func recurse(n int, p *S, v uintptr) {
+ if n > 0 {
+ recurse(n-1, p, v)
+ }
+ if uintptr(unsafe.Pointer(p)) != v {
+ panic("adjusted notinheap pointer")
+ }
+}
diff --git a/test/fixedbugs/issue41711.go b/test/fixedbugs/issue41711.go
new file mode 100644
index 0000000000..4e09440ba5
--- /dev/null
+++ b/test/fixedbugs/issue41711.go
@@ -0,0 +1,17 @@
+// compile -d=ssa/check/on
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func f() uint32 {
+ s := "food"
+ x := uint32(s[0]) + uint32(s[1])<<8 + uint32(s[2])<<16 + uint32(s[3])<<24
+ // x is a constant, but that's not known until lowering.
+ // shifting it by 8 moves the high byte up into the high 32 bits of
+ // a 64-bit word. That word is not properly sign-extended by the faulty
+ // rule, which causes the compiler to fail.
+ return x << 8
+}
diff --git a/test/notinheap.go b/test/notinheap.go
index 16c3f8faf0..2188a38a14 100644
--- a/test/notinheap.go
+++ b/test/notinheap.go
@@ -11,23 +11,11 @@ package p
//go:notinheap
type nih struct{}
-// Types embedding notinheap types must be notinheap.
+type embed4 map[nih]int // ERROR "incomplete \(or unallocatable\) map key not allowed"
-type embed1 struct { // ERROR "must be go:notinheap"
- x nih
-}
-
-type embed2 [1]nih // ERROR "must be go:notinheap"
-
-type embed3 struct { // ERROR "must be go:notinheap"
- x [1]nih
-}
-
-type embed4 map[nih]int // ERROR "go:notinheap map key not allowed"
-
-type embed5 map[int]nih // ERROR "go:notinheap map value not allowed"
+type embed5 map[int]nih // ERROR "incomplete \(or unallocatable\) map value not allowed"
-type emebd6 chan nih // ERROR "chan of go:notinheap type not allowed"
+type emebd6 chan nih // ERROR "chan of incomplete \(or unallocatable\) type not allowed"
type okay1 *nih
@@ -56,8 +44,8 @@ var sink interface{}
func i() {
sink = new(t1) // no error
- sink = (*t2)(new(t1)) // ERROR "cannot convert(.|\n)*t2 is go:notinheap"
- sink = (*t2)(new(struct{ x int })) // ERROR "cannot convert(.|\n)*t2 is go:notinheap"
- sink = []t3("foo") // ERROR "cannot convert(.|\n)*t3 is go:notinheap"
- sink = []t4("bar") // ERROR "cannot convert(.|\n)*t4 is go:notinheap"
+ sink = (*t2)(new(t1)) // ERROR "cannot convert(.|\n)*t2 is incomplete \(or unallocatable\)"
+ sink = (*t2)(new(struct{ x int })) // ERROR "cannot convert(.|\n)*t2 is incomplete \(or unallocatable\)"
+ sink = []t3("foo") // ERROR "cannot convert(.|\n)*t3 is incomplete \(or unallocatable\)"
+ sink = []t4("bar") // ERROR "cannot convert(.|\n)*t4 is incomplete \(or unallocatable\)"
}
diff --git a/test/notinheap2.go b/test/notinheap2.go
index 944f2993ab..100ed37b72 100644
--- a/test/notinheap2.go
+++ b/test/notinheap2.go
@@ -13,24 +13,62 @@ type nih struct {
next *nih
}
-// Globals and stack variables are okay.
+// Global variables are okay.
var x nih
+// Stack variables are not okay.
+
func f() {
- var y nih
+ var y nih // ERROR "nih is incomplete \(or unallocatable\); stack allocation disallowed"
x = y
}
// Heap allocation is not okay.
var y *nih
+var y2 *struct{ x nih }
+var y3 *[1]nih
var z []nih
+var w []nih
+var n int
+var sink interface{}
+
+type embed1 struct { // implicitly notinheap
+ x nih
+}
+
+type embed2 [1]nih // implicitly notinheap
+
+type embed3 struct { // implicitly notinheap
+ x [1]nih
+}
+
+// Type aliases inherit the go:notinheap-ness of the type they alias.
+type nihAlias = nih
+
+type embedAlias1 struct { // implicitly notinheap
+ x nihAlias
+}
+type embedAlias2 [1]nihAlias // implicitly notinheap
func g() {
- y = new(nih) // ERROR "heap allocation disallowed"
- z = make([]nih, 1) // ERROR "heap allocation disallowed"
- z = append(z, x) // ERROR "heap allocation disallowed"
+ y = new(nih) // ERROR "can't be allocated in Go"
+ y2 = new(struct{ x nih }) // ERROR "can't be allocated in Go"
+ y3 = new([1]nih) // ERROR "can't be allocated in Go"
+ z = make([]nih, 1) // ERROR "can't be allocated in Go"
+ z = append(z, x) // ERROR "can't be allocated in Go"
+
+ sink = new(embed1) // ERROR "can't be allocated in Go"
+ sink = new(embed2) // ERROR "can't be allocated in Go"
+ sink = new(embed3) // ERROR "can't be allocated in Go"
+ sink = new(embedAlias1) // ERROR "can't be allocated in Go"
+ sink = new(embedAlias2) // ERROR "can't be allocated in Go"
+
+ // Test for special case of OMAKESLICECOPY
+ x := make([]nih, n) // ERROR "can't be allocated in Go"
+ copy(x, z)
+ z = x
}
// Writes don't produce write barriers.