aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/ssa')
-rw-r--r--src/cmd/compile/internal/ssa/check.go33
-rw-r--r--src/cmd/compile/internal/ssa/cse.go10
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go16
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules4
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go18
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go65
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go8
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go31
8 files changed, 159 insertions, 26 deletions
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index d78e915091..d6d39aee76 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -294,6 +294,39 @@ func checkFunc(f *Func) {
}
}
}
+
+ // Check that if a tuple has a memory type, it is second.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsTuple() && v.Type.FieldType(0).IsMemory() {
+ f.Fatalf("memory is first in a tuple: %s\n", v.LongString())
+ }
+ }
+ }
+
+ // Check that only one memory is live at any point.
+ // TODO: make this check examine interblock.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ var mem *Value // the live memory
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ for _, a := range v.Args {
+ if a.Type.IsMemory() || a.Type.IsTuple() && a.Type.FieldType(1).IsMemory() {
+ if mem == nil {
+ mem = a
+ } else if mem != a {
+ f.Fatalf("two live mems @ %s: %s and %s", v, mem, a)
+ }
+ }
+ }
+ }
+ if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ mem = v
+ }
+ }
+ }
+ }
}
// domCheck reports whether x dominates y (including x==y).
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
index 4e07c89b88..9ab18d89e9 100644
--- a/src/cmd/compile/internal/ssa/cse.go
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -313,9 +313,13 @@ func cmpVal(v, w *Value, auxIDs auxmap) Cmp {
// that generate memory.
return lt2Cmp(v.ID < w.ID)
}
-
- if tc := v.Type.Compare(w.Type); tc != CMPeq {
- return tc
+ // OpSelect is a pseudo-op. We need to be more agressive
+ // regarding CSE to keep multiple OpSelect's of the same
+ // argument from existing.
+ if v.Op != OpSelect0 && v.Op != OpSelect1 {
+ if tc := v.Type.Compare(w.Type); tc != CMPeq {
+ return tc
+ }
}
if v.Aux != w.Aux {
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index ee6ed51d73..80fa92d94b 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -35,8 +35,20 @@ type DummyFrontend struct {
func (DummyFrontend) StringData(s string) interface{} {
return nil
}
-func (DummyFrontend) Auto(t Type) GCNode {
- return nil
+
+type dummyGCNode struct {
+ typ Type
+ name string
+}
+
+func (d *dummyGCNode) Typ() Type {
+ return d.typ
+}
+func (d *dummyGCNode) String() string {
+ return d.name
+}
+func (d DummyFrontend) Auto(t Type) GCNode {
+ return &dummyGCNode{typ: t, name: "dummy"}
}
func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeInt(), s.Off + 8}
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 7ecea02dae..caea0506fa 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -537,8 +537,8 @@
(CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c]))
(CMPW x (MOVDconst [c])) -> (CMPWconst x [c])
(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [c]))
-(CMPU x (MOVDconst [c])) && is32Bit(c) -> (CMPUconst x [int64(uint32(c))])
-(CMPU (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))]))
+(CMPU x (MOVDconst [c])) && isU32Bit(c) -> (CMPUconst x [int64(uint32(c))])
+(CMPU (MOVDconst [c]) x) && isU32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))]))
(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(uint32(c))])
(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))]))
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 7bf778609e..90b5947f1c 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -1699,6 +1699,24 @@ sinking:
}
p := d.Preds[0].b // block in loop exiting to d.
+ // Check that the spill value is still live at the start of d.
+ // If it isn't, we can't move the spill here because some other value
+ // may be using the same stack slot. See issue 20472.
+ // The spill value can't be defined in d, so that makes our lives easier.
+ for _, x := range stacklive[d.ID] {
+ if x == vsp.ID {
+ goto stillLive
+ }
+ }
+ for _, v := range d.Values {
+ if v.Op == OpLoadReg && v.Args[0] == vsp {
+ goto stillLive
+ }
+ }
+ // Spill is not live - abort sinking this spill.
+ continue sinking
+ stillLive:
+
endregs := s.endRegs[p.ID]
for _, regrec := range endregs {
if regrec.v == e && regrec.r != noRegister && regrec.c == e { // TODO: regrec.c != e implies different spill possible.
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index cf8f452d12..d3d1891bcd 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -31,3 +31,68 @@ func TestLiveControlOps(t *testing.T) {
regalloc(f.f)
checkFunc(f.f)
}
+
+func TestSpillMove(t *testing.T) {
+ // Test for issue 20472. We shouldn't move a spill out to exit blocks
+ // if there is an exit block where the spill is dead but the pre-spill
+ // value is live.
+ c := testConfig(t)
+ ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ arg1Aux := c.fe.Auto(TypeInt64)
+ arg2Aux := c.fe.Auto(ptrType)
+ f := Fun(c, "entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("x", OpArg, TypeInt64, 0, arg1Aux),
+ Valu("p", OpArg, ptrType, 0, arg2Aux),
+ Valu("a", OpAMD64TESTQ, TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, TypeInt64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store before call, y is available in a register
+ Valu("mem2", OpAMD64MOVQstore, TypeMem, 0, nil, "p", "y", "mem"),
+ Valu("mem3", OpAMD64CALLstatic, TypeMem, 0, nil, "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, TypeMem, 0, nil, "mem"),
+ Valu("mem5", OpAMD64MOVQstore, TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // There should still be a spill in Loop1, and nowhere else.
+ if numSpills(f.blocks["loop1"]) != 1 {
+ t.Errorf("spill missing from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 0 {
+ t.Errorf("spill present in exit2")
+ }
+
+}
+
+func numSpills(b *Block) int {
+ n := 0
+ for _, v := range b.Values {
+ if v.Op == OpStoreReg {
+ n++
+ }
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index 866cf50041..af8fd1d456 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -6784,7 +6784,7 @@ func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (CMPU x (MOVDconst [c]))
- // cond: is32Bit(c)
+ // cond: isU32Bit(c)
// result: (CMPUconst x [int64(uint32(c))])
for {
x := v.Args[0]
@@ -6793,7 +6793,7 @@ func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
break
}
c := v_1.AuxInt
- if !(is32Bit(c)) {
+ if !(isU32Bit(c)) {
break
}
v.reset(OpS390XCMPUconst)
@@ -6802,7 +6802,7 @@ func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
return true
}
// match: (CMPU (MOVDconst [c]) x)
- // cond: is32Bit(c)
+ // cond: isU32Bit(c)
// result: (InvertFlags (CMPUconst x [int64(uint32(c))]))
for {
v_0 := v.Args[0]
@@ -6811,7 +6811,7 @@ func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
}
c := v_0.AuxInt
x := v.Args[1]
- if !(is32Bit(c)) {
+ if !(isU32Bit(c)) {
break
}
v.reset(OpS390XInvertFlags)
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index a455a9a399..78b61f0959 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -148,19 +148,20 @@ func schedule(f *Func) {
}
}
+ // TODO: make this logic permanent in types.IsMemory?
+ isMem := func(v *Value) bool {
+ return v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory()
+ }
+
for _, b := range f.Blocks {
// Find store chain for block.
// Store chains for different blocks overwrite each other, so
// the calculated store chain is good only for this block.
for _, v := range b.Values {
- if v.Op != OpPhi && v.Type.IsMemory() {
- mem := v
- if v.Op == OpSelect1 {
- v = v.Args[0]
- }
+ if v.Op != OpPhi && isMem(v) {
for _, w := range v.Args {
- if w.Type.IsMemory() {
- nextMem[w.ID] = mem
+ if isMem(w) {
+ nextMem[w.ID] = v
}
}
}
@@ -179,15 +180,15 @@ func schedule(f *Func) {
uses[w.ID]++
}
// Any load must come before the following store.
- if v.Type.IsMemory() || !w.Type.IsMemory() {
- continue // not a load
- }
- s := nextMem[w.ID]
- if s == nil || s.Block != b {
- continue
+ if !isMem(v) && isMem(w) {
+ // v is a load.
+ s := nextMem[w.ID]
+ if s == nil || s.Block != b {
+ continue
+ }
+ additionalArgs[s.ID] = append(additionalArgs[s.ID], v)
+ uses[v.ID]++
}
- additionalArgs[s.ID] = append(additionalArgs[s.ID], v)
- uses[v.ID]++
}
}