aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/regalloc_test.go
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2017-05-24 11:16:42 -0700
committerKeith Randall <khr@golang.org>2017-05-24 21:36:06 +0000
commit67a782b8cc5b073041a7b5ab14144c922737d44f (patch)
tree8503c62f6ae716e64a23f6de66f43b56a9ebde6a /src/cmd/compile/internal/ssa/regalloc_test.go
parente1e2ca11c65afac598c2173d1f05194a02883826 (diff)
downloadgo-67a782b8cc5b073041a7b5ab14144c922737d44f.tar.gz
go-67a782b8cc5b073041a7b5ab14144c922737d44f.zip
cmd/compile: test for moving spills
Test that we really do move spills down to the dominator of all the uses. Also add a test where go1.8 would have moved the spill out of the loop into two exit points, but go1.9 doesn't move the spill. This is a case where the 1.9 spill moving code does not subsume the 1.8 spill moving code. Maybe we resurrect moving-spills-out-of-loops CL to fix this one. (I suspect it wouldn't be worth the effort, but would be happy to hear evidence otherwise.) Update #20472 Change-Id: I7dbf8d65e7f4d675d14e5ecf502887cebda35d2a Reviewed-on: https://go-review.googlesource.com/44038 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src/cmd/compile/internal/ssa/regalloc_test.go')
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go109
1 files changed, 109 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index 1e4dea8b27..02751a9349 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -70,3 +70,112 @@ func TestSpillWithLoop(t *testing.T) {
}
}
}
+
+func TestSpillMove1(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store before call, y is available in a register
+ Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"),
+ Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Spill should be moved to exit2.
+ if numSpills(f.blocks["loop1"]) != 0 {
+ t.Errorf("spill present from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 1 {
+ t.Errorf("spill missing in exit2")
+ }
+
+}
+
+func TestSpillMove2(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store after call, y must be loaded from a spill location
+ Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem"),
+ Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // There should be a spill in loop1, and nowhere else.
+ // TODO: resurrect moving spills out of loops? We could put spills at the start of both exit1 and exit2.
+ if numSpills(f.blocks["loop1"]) != 1 {
+ t.Errorf("spill missing from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 0 {
+ t.Errorf("spill present in exit2")
+ }
+
+}
+
+func numSpills(b *Block) int {
+ n := 0
+ for _, v := range b.Values {
+ if v.Op == OpStoreReg {
+ n++
+ }
+ }
+ return n
+}