From 27d5fccd2119099c70ab6a4418626beac7a97b4b Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 7 Jan 2021 14:57:53 -0800 Subject: [release-branch.go1.15] cmd/compile: don't short-circuit copies whose source is volatile Current optimization: When we copy a->b and then b->c, we might as well copy a->c instead of b->c (then b might be dead and go away). *Except* if a is a volatile location (might be clobbered by a call). In that case, we really do want to copy a immediately, because there might be a call before we can do the a->c copy. User calls can't happen in between, because the rule matches up the memory states. But calls inserted for memory barriers, particularly runtime.typedmemmove, can. (I guess we could introduce a register-calling-convention version of runtime.typedmemmove, but that seems a bigger change than this one.) Fixes #43575 Change-Id: Ifa518bb1a6f3a8dd46c352d4fd54ea9713b3eb1a Reviewed-on: https://go-review.googlesource.com/c/go/+/282492 Trust: Keith Randall Trust: Josh Bleecher Snyder Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Josh Bleecher Snyder (cherry picked from commit 304f769ffc68e64244266b3aadbf91e6738c0064) Reviewed-on: https://go-review.googlesource.com/c/go/+/282558 Trust: Dmitri Shuralyov --- src/cmd/compile/internal/ssa/gen/generic.rules | 4 +-- src/cmd/compile/internal/ssa/rewritegeneric.go | 8 +++--- test/fixedbugs/issue43570.go | 40 ++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 6 deletions(-) create mode 100644 test/fixedbugs/issue43570.go diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index ed5bfc81fd..8bbe913380 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -2404,7 +2404,7 @@ (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) && t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) - && isStackPtr(src) + && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) => (Move {t1} [s] dst src midmem) @@ -2413,7 +2413,7 @@ (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) && t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) - && isStackPtr(src) + && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) => (Move {t1} [s] dst src midmem) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 9f4e1b95bd..6a09616aad 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -13666,7 +13666,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { return true } // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) - // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) // result: (Move {t1} [s] dst src midmem) for { s := auxIntToInt64(v.AuxInt) @@ -13680,7 +13680,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { t2 := auxToType(midmem.Aux) src := midmem.Args[1] tmp2 := midmem.Args[0] - if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { + if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { break } v.reset(OpMove) @@ -13690,7 +13690,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { return true } // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) - // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) // result: (Move {t1} [s] dst src midmem) for { s := auxIntToInt64(v.AuxInt) @@ -13708,7 +13708,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool { t2 := auxToType(midmem_0.Aux) src := midmem_0.Args[1] tmp2 := midmem_0.Args[0] - if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { + if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { break } v.reset(OpMove) diff --git a/test/fixedbugs/issue43570.go b/test/fixedbugs/issue43570.go new file mode 100644 index 0000000000..d073fde5f6 --- /dev/null +++ b/test/fixedbugs/issue43570.go @@ -0,0 +1,40 @@ +// run + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "fmt" + +type T [8]*int + +//go:noinline +func f(x int) T { + return T{} +} + +//go:noinline +func g(x int, t T) { + if t != (T{}) { + panic(fmt.Sprintf("bad: %v", t)) + } +} + +func main() { + const N = 10000 + var q T + func() { + for i := 0; i < N; i++ { + q = f(0) + g(0, q) + sink = make([]byte, 1024) + } + }() + // Note that the closure is a trick to get the write to q to be a + // write to a pointer that is known to be non-nil and requires + // a write barrier. +} + +var sink []byte -- cgit v1.2.3-54-g00ecf