aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2019-03-10 08:34:59 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2019-03-19 16:55:40 +0000
commit80b6812d7b33cbc16232a3b1b631aaa26be17a71 (patch)
tree619231ea7d286e5e8afe474e059e052a21a298f7 /src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
parent3cb1e9d98a98abed5fbdcf78a54956851310fe30 (diff)
downloadgo-80b6812d7b33cbc16232a3b1b631aaa26be17a71.tar.gz
go-80b6812d7b33cbc16232a3b1b631aaa26be17a71.zip
cmd/compile: move flagalloc op splitting to rewrite rules
Flagalloc has the unenviable task of splitting flag-generating ops that have been merged with loads when the flags need to "spilled" (i.e. regenerated). Since there weren't very many of them, there was a hard-coded list of ops and bespoke code written to split them. This change migrates load splitting into rewrite rules, to make them easier to maintain. Change-Id: I7750eafb888a802206c410f9c341b3133e7748b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/166978 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewriteAMD64splitload.go')
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64splitload.go253
1 files changed, 253 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
new file mode 100644
index 0000000000..af7067b754
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
@@ -0,0 +1,253 @@
+// Code generated from gen/AMD64splitload.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "fmt"
+import "math"
+import "cmd/internal/obj"
+import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
+
+var _ = fmt.Println // in case not otherwise used
+var _ = math.MinInt8 // in case not otherwise used
+var _ = obj.ANOP // in case not otherwise used
+var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
+
+func rewriteValueAMD64splitload(v *Value) bool {
+ switch v.Op {
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstload_0(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBload_0(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstload_0(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLload_0(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstload_0(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQload_0(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstload_0(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWload_0(v)
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond:
+ // result: (CMPBconst (MOVBload {sym} [offOnly(vo)] ptr mem) [valOnly(vo)])
+ for {
+ vo := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = valOnly(vo)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = offOnly(vo)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // cond:
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond:
+ // result: (CMPLconst (MOVLload {sym} [offOnly(vo)] ptr mem) [valOnly(vo)])
+ for {
+ vo := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = valOnly(vo)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = offOnly(vo)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // cond:
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond:
+ // result: (CMPQconst (MOVQload {sym} [offOnly(vo)] ptr mem) [valOnly(vo)])
+ for {
+ vo := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = valOnly(vo)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = offOnly(vo)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPQload {sym} [off] ptr x mem)
+ // cond:
+ // result: (CMPQ (MOVQload {sym} [off] ptr mem) x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond:
+ // result: (CMPWconst (MOVWload {sym} [offOnly(vo)] ptr mem) [valOnly(vo)])
+ for {
+ vo := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = valOnly(vo)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = offOnly(vo)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWload_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // cond:
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteBlockAMD64splitload(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := b.Func.fe
+ _ = fe
+ typ := &config.Types
+ _ = typ
+ switch b.Kind {
+ }
+ return false
+}