aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2022-08-22 10:26:50 -0700
committerHeschi Kreinick <heschi@google.com>2022-08-29 19:12:46 +0000
commit66197f01e1df1da7ddab91055e05b5d1c04d55e2 (patch)
tree9499078561858c59fc718562f272386164f7abba
parent569d949eeab8b16ae2548a3132ca6f1b8657262b (diff)
downloadgo-66197f01e1df1da7ddab91055e05b5d1c04d55e2.tar.gz
go-66197f01e1df1da7ddab91055e05b5d1c04d55e2.zip
[release-branch.go1.18] cmd/compile: handle partially overlapping assignments
Normally, when moving Go values of type T from one location to another, we don't need to worry about partial overlaps. The two Ts must either be in disjoint (nonoverlapping) memory or in exactly the same location. There are 2 cases where this isn't true: 1) Using unsafe you can arrange partial overlaps. 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array. https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer This feature can be used to construct partial overlaps of array types. var a [3]int p := (*[2]int)(a[:]) q := (*[2]int)(a[1:]) *p = *q We don't care about solving 1. Or at least, we haven't historically and no one has complained. For 2, we need to ensure that if there might be partial overlap, then we can't use OpMove; we must use memmove instead. (memmove handles partial overlap by copying in the correct direction. OpMove does not.) Note that we have to be careful here not to introduce a call when we're marshaling arguments to a call or unmarshaling results from a call. Fixes #54603 Change-Id: I1ca6aba8041576849c1d85f1fa33ae61b80a373d Reviewed-on: https://go-review.googlesource.com/c/go/+/425076 Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Matthew Dempsky <mdempsky@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Keith Randall <khr@golang.org> (cherry picked from commit 332a5981d0ae3f21f668f94755f43ecd8ee9a9eb) Reviewed-on: https://go-review.googlesource.com/c/go/+/425198
-rw-r--r--src/cmd/compile/internal/ir/symtab.go1
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go4
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go14
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go79
-rw-r--r--test/fixedbugs/issue54467.go26
-rw-r--r--test/nilptr5.go8
6 files changed, 124 insertions, 8 deletions
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index b204a1d544..148edb2c88 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -26,6 +26,7 @@ var Syms struct {
GCWriteBarrier *obj.LSym
Goschedguarded *obj.LSym
Growslice *obj.LSym
+ Memmove *obj.LSym
Msanread *obj.LSym
Msanwrite *obj.LSym
Msanmove *obj.LSym
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 4f133b1ff6..69cd828c09 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -355,7 +355,9 @@ var genericOps = []opData{
{name: "Load", argLength: 2}, // Load from arg0. arg1=memory
{name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value".
{name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
- // The source and destination of Move may overlap in some cases. See e.g.
+ // Normally we require that the source and destination of Move do not overlap.
+ // There is an exception when we know all the loads will happen before all
+ // the stores. In that case, overlap is ok. See
// memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
// returns true, we must do all loads before all stores, when lowering Move.
// The type of Move is used for the write barrier pass to insert write barriers
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index eb8fa0c02a..70cd4c54cb 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -1358,7 +1358,8 @@ func zeroUpper56Bits(x *Value, depth int) bool {
// isInlinableMemmove reports whether the given arch performs a Move of the given size
// faster than memmove. It will only return true if replacing the memmove with a Move is
-// safe, either because Move is small or because the arguments are disjoint.
+// safe, either because Move will do all of its loads before any of its stores, or
+// because the arguments are known to be disjoint.
// This is used as a check for replacing memmove with Move ops.
func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// It is always safe to convert memmove into Move when its arguments are disjoint.
@@ -1377,6 +1378,9 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
}
return false
}
+func IsInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
+ return isInlinableMemmove(dst, src, sz, c)
+}
// logLargeCopy logs the occurrence of a large copy.
// The best place to do this is in the rewrite rules where the size of the move is easy to find.
@@ -1390,6 +1394,14 @@ func logLargeCopy(v *Value, s int64) bool {
}
return true
}
+func LogLargeCopy(funcName string, pos src.XPos, s int64) {
+ if s < 128 {
+ return
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(pos, "copy", "lower", funcName, fmt.Sprintf("%d bytes", s))
+ }
+}
// hasSmallRotate reports whether the architecture has rotate instructions
// for sizes < 32-bit. This is used to decide whether to promote some rotations.
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 364e0c8197..b19f3c812b 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -105,6 +105,7 @@ func InitConfig() {
ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+ ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
@@ -1387,7 +1388,47 @@ func (s *state) zero(t *types.Type, dst *ssa.Value) {
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
+ s.moveWhichMayOverlap(t, dst, src, false)
+}
+func (s *state) moveWhichMayOverlap(t *types.Type, dst, src *ssa.Value, mayOverlap bool) {
s.instrumentMove(t, dst, src)
+ if mayOverlap && t.IsArray() && t.NumElem() > 1 && !ssa.IsInlinableMemmove(dst, src, t.Size(), s.f.Config) {
+ // Normally, when moving Go values of type T from one location to another,
+ // we don't need to worry about partial overlaps. The two Ts must either be
+ // in disjoint (nonoverlapping) memory or in exactly the same location.
+ // There are 2 cases where this isn't true:
+ // 1) Using unsafe you can arrange partial overlaps.
+ // 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array.
+ // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer
+ // This feature can be used to construct partial overlaps of array types.
+ // var a [3]int
+ // p := (*[2]int)(a[:])
+ // q := (*[2]int)(a[1:])
+ // *p = *q
+ // We don't care about solving 1. Or at least, we haven't historically
+ // and no one has complained.
+ // For 2, we need to ensure that if there might be partial overlap,
+ // then we can't use OpMove; we must use memmove instead.
+ // (memmove handles partial overlap by copying in the correct
+ // direction. OpMove does not.)
+ //
+ // Note that we have to be careful here not to introduce a call when
+ // we're marshaling arguments to a call or unmarshaling results from a call.
+ // Cases where this is happening must pass mayOverlap to false.
+ // (Currently this only happens when unmarshaling results of a call.)
+ if t.HasPointers() {
+ s.rtcall(ir.Syms.Typedmemmove, true, nil, s.reflectType(t), dst, src)
+ // We would have otherwise implemented this move with straightline code,
+ // including a write barrier. Pretend we issue a write barrier here,
+ // so that the write barrier tests work. (Otherwise they'd need to know
+ // the details of IsInlineableMemmove.)
+ s.curfn.SetWBPos(s.peekPos())
+ } else {
+ s.rtcall(ir.Syms.Memmove, true, nil, dst, src, s.constInt(types.Types[types.TUINTPTR], t.Size()))
+ }
+ ssa.LogLargeCopy(s.f.Name, s.peekPos(), t.Size())
+ return
+ }
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
s.vars[memVar] = store
@@ -1565,6 +1606,36 @@ func (s *state) stmt(n ir.Node) {
return
}
+ // mayOverlap keeps track of whether the LHS and RHS might
+ // refer to overlapping memory.
+ mayOverlap := true
+ if n.Y == nil {
+ // Not a move at all, mayOverlap is not relevant.
+ } else if n.Def {
+ // A variable being defined cannot overlap anything else.
+ mayOverlap = false
+ } else if n.X.Op() == ir.ONAME && n.Y.Op() == ir.ONAME {
+ // Two named things never overlap.
+ // (Or they are identical, which we treat as nonoverlapping.)
+ mayOverlap = false
+ } else if n.Y.Op() == ir.ODEREF {
+ p := n.Y.(*ir.StarExpr).X
+ for p.Op() == ir.OCONVNOP {
+ p = p.(*ir.ConvExpr).X
+ }
+ if p.Op() == ir.OSPTR && p.(*ir.UnaryExpr).X.Type().IsString() {
+ // Pointer fields of strings point to unmodifiable memory.
+ // That memory can't overlap with the memory being written.
+ mayOverlap = false
+ }
+ } else if n.Y.Op() == ir.ORESULT || n.Y.Op() == ir.OCALLFUNC || n.Y.Op() == ir.OCALLINTER {
+ // When copying values out of the return area of a call, we know
+ // the source and destination don't overlap. Importantly, we must
+ // set mayOverlap so we don't introduce a call to memmove while
+ // we still have live data in the argument area.
+ mayOverlap = false
+ }
+
// Evaluate RHS.
rhs := n.Y
if rhs != nil {
@@ -1665,7 +1736,7 @@ func (s *state) stmt(n ir.Node) {
}
}
- s.assign(n.X, r, deref, skip)
+ s.assignWhichMayOverlap(n.X, r, deref, skip, mayOverlap)
case ir.OIF:
n := n.(*ir.IfStmt)
@@ -3503,7 +3574,11 @@ const (
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
+// mayOverlap indicates whether left&right might partially overlap in memory. Default is false.
func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+ s.assignWhichMayOverlap(left, right, deref, skip, false)
+}
+func (s *state) assignWhichMayOverlap(left ir.Node, right *ssa.Value, deref bool, skip skipMask, mayOverlap bool) {
if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
@@ -3604,7 +3679,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask
if right == nil {
s.zero(t, addr)
} else {
- s.move(t, addr, right)
+ s.moveWhichMayOverlap(t, addr, right, mayOverlap)
}
return
}
diff --git a/test/fixedbugs/issue54467.go b/test/fixedbugs/issue54467.go
new file mode 100644
index 0000000000..42e221c954
--- /dev/null
+++ b/test/fixedbugs/issue54467.go
@@ -0,0 +1,26 @@
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+func main() {
+ var x [64]byte
+ for i := range x {
+ x[i] = byte(i)
+ }
+ y := x
+
+ copy(x[4:36], x[2:34])
+ *(*[32]byte)(y[4:36]) = *(*[32]byte)(y[2:34])
+
+ for i := range x {
+ if x[i] != y[i] {
+ fmt.Printf("x[%v] = %v; y[%v] = %v\n", i, x[i], i, y[i])
+ }
+ }
+}
diff --git a/test/nilptr5.go b/test/nilptr5.go
index 2c48c0b261..118746e4aa 100644
--- a/test/nilptr5.go
+++ b/test/nilptr5.go
@@ -1,7 +1,7 @@
// errorcheck -0 -d=nil
-// +build !wasm
-// +build !aix
+//go:build !wasm && !aix
+// +build !wasm,!aix
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -20,7 +20,7 @@ func f5(p *float32, q *float64, r *float32, s *float64) float64 {
return x + y
}
-type T [29]byte
+type T struct{ b [29]byte }
func f6(p, q *T) {
x := *p // ERROR "removed nil check"
@@ -28,6 +28,6 @@ func f6(p, q *T) {
}
// make sure to remove nil check for memory move (issue #18003)
-func f8(t *[8]int) [8]int {
+func f8(t *struct{ b [8]int }) struct{ b [8]int } {
return *t // ERROR "removed nil check"
}