diff options
author | Josh Bleecher Snyder <josharian@gmail.com> | 2020-04-23 12:00:14 -0700 |
---|---|---|
committer | Josh Bleecher Snyder <josharian@gmail.com> | 2020-04-23 19:39:25 +0000 |
commit | e0915dea09744c1704086048ed51bb8e9ddbcb6f (patch) | |
tree | 7e1a27b698d5177b6ebf08a6baf85468984686eb /src/cmd/compile/internal/ssa/rewriteAMD64splitload.go | |
parent | d9d88dd27f31936e11dfa621937f09db9a23af36 (diff) | |
download | go-e0915dea09744c1704086048ed51bb8e9ddbcb6f.tar.gz go-e0915dea09744c1704086048ed51bb8e9ddbcb6f.zip |
cmd/compile: splitload (CMPconst [0] x) into (TEST x x) on amd64
Provides minor widespread benefit to generated code.
Removes one source of random fluctuation when changing
other aspects of the compiler.
Change-Id: I16db6f5e240a97d27f05dc1ba5b8b729af3adb12
Reviewed-on: https://go-review.googlesource.com/c/go/+/229702
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewriteAMD64splitload.go')
-rw-r--r-- | src/cmd/compile/internal/ssa/rewriteAMD64splitload.go | 271 |
1 files changed, 271 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go index b04dcaf7fb..65bfec0f68 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go @@ -58,12 +58,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPBconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTB x:(MOVBload {sym} [vo.Off32()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTB) + x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPBconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 // result: (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 mem := v_1 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPBconst) v.AuxInt = int8ToAuxInt(vo.Val8()) v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) @@ -73,6 +96,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { v_2 := v.Args[2] @@ -81,6 +105,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTB) + x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -88,6 +133,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPBconst) v.AuxInt = int8ToAuxInt(vo.Val8()) v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) @@ -97,6 +145,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool { v_2 := v.Args[2] @@ -152,12 +201,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPLconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTL x:(MOVLload {sym} [vo.Off32()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTL) + x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPLconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 // result: (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 mem := v_1 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPLconst) v.AuxInt = int32ToAuxInt(vo.Val32()) v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) @@ -167,6 +239,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { v_2 := v.Args[2] @@ -175,6 +248,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTL) + x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -182,6 +276,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPLconst) v.AuxInt = int32ToAuxInt(vo.Val32()) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) @@ -191,6 +288,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { v_2 := v.Args[2] @@ -199,6 +297,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTL) + x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -206,6 +325,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPLconst) v.AuxInt = int32ToAuxInt(vo.Val32()) v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) @@ -215,6 +337,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool { v_2 := v.Args[2] @@ -295,12 +418,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPQconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTQ x:(MOVQload {sym} [vo.Off32()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTQ) + x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPQconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 // result: (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 mem := v_1 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPQconst) v.AuxInt = int32ToAuxInt(vo.Val32()) v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) @@ -310,6 +456,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { v_2 := v.Args[2] @@ -318,6 +465,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTQ) + x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -325,6 +493,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPQconst) v.AuxInt = int32ToAuxInt(vo.Val32()) v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) @@ -334,6 +505,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { v_2 := v.Args[2] @@ -342,6 +514,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTQ) + x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -349,6 +542,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPQconst) v.AuxInt = int32ToAuxInt(vo.Val32()) v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) @@ -358,6 +554,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool { v_2 := v.Args[2] @@ -438,12 +635,35 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPWconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTW x:(MOVWload {sym} [vo.Off32()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTW) + x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPWconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 // result: (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 mem := v_1 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) @@ -453,6 +673,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { v_2 := v.Args[2] @@ -461,6 +682,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTW) + x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -468,6 +710,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) @@ -477,6 +722,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { v_2 := v.Args[2] @@ -485,6 +731,27 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTW) + x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) + x.AuxInt = int32ToAuxInt(vo.Off32()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()]) for { vo := auxIntToValAndOff(v.AuxInt) @@ -492,6 +759,9 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { ptr := v_0 idx := v_1 mem := v_2 + if !(vo.Val() != 0) { + break + } v.reset(OpAMD64CMPWconst) v.AuxInt = int16ToAuxInt(vo.Val16()) v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) @@ -501,6 +771,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { v.AddArg(v0) return true } + return false } func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool { v_2 := v.Args[2] |