aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go402
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go7434
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go9380
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go9044
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go1648
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go758
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go18
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go70
8 files changed, 14374 insertions, 14380 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 0947e65ca7..0cb428b6fa 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -117,15 +117,17 @@ func genRules(arch arch) {
if unbalanced(rule) {
continue
}
- op := strings.Split(rule, " ")[0][1:]
- if op[len(op)-1] == ')' {
- op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ...
- }
+
loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno)
- if isBlock(op, arch) {
- blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc})
+ r := Rule{rule: rule, loc: loc}
+ if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) {
+ blockrules[rawop] = append(blockrules[rawop], r)
} else {
- oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc})
+ // Do fancier value op matching.
+ match, _, _ := r.parse()
+ op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+ opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+ oprules[opname] = append(oprules[opname], r)
}
rule = ""
ruleLineno = 0
@@ -157,8 +159,8 @@ func genRules(arch arch) {
fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
fmt.Fprintf(w, "switch v.Op {\n")
for _, op := range ops {
- fmt.Fprintf(w, "case %s:\n", opName(op, arch))
- fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, opName(op, arch))
+ fmt.Fprintf(w, "case %s:\n", op)
+ fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
}
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return false\n")
@@ -167,7 +169,7 @@ func genRules(arch arch) {
// Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers.
for _, op := range ops {
- fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch))
+ fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b")
var canFail bool
@@ -334,141 +336,108 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t
}
canFail := false
- // split body up into regions. Split by spaces/tabs, except those
- // contained in () or {}.
- s := split(match[1 : len(match)-1]) // remove parens, then split
-
- // Find op record
- var op opData
- for _, x := range genericOps {
- if x.name == s[0] {
- op = x
- break
- }
- }
- for _, x := range arch.ops {
- if x.name == s[0] {
- op = x
- break
- }
- }
- if op.name == "" {
- log.Fatalf("%s: unknown op %s", loc, s[0])
- }
+ op, oparch, typ, auxint, aux, args := parseValue(match, arch, loc)
// check op
if !top {
- fmt.Fprintf(w, "if %s.Op != %s {\nbreak\n}\n", v, opName(s[0], arch))
+ fmt.Fprintf(w, "if %s.Op != Op%s%s {\nbreak\n}\n", v, oparch, op.name)
canFail = true
}
- // check type/aux/args
- argnum := 0
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction
- t := a[1 : len(a)-1] // remove <>
- if !isVariable(t) {
- // code. We must match the results of this code.
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
+ if typ != "" {
+ if !isVariable(typ) {
+ // code. We must match the results of this code.
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[typ]; ok {
+ // must match previous variable
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
canFail = true
} else {
- // variable
- if _, ok := m[t]; ok {
- // must match previous variable
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
- canFail = true
- } else {
- m[t] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Type\n", t, v)
- }
+ m[typ] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Type\n", typ, v)
}
- } else if a[0] == '[' {
- // auxint restriction
- switch op.aux {
- case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove []
- if !isVariable(x) {
- // code
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
+ }
+ }
+
+ if auxint != "" {
+ if !isVariable(auxint) {
+ // code
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[auxint]; ok {
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
canFail = true
} else {
- // variable
- if _, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
- canFail = true
- } else {
- m[x] = struct{}{}
- fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v)
- }
- }
- } else if a[0] == '{' {
- // aux restriction
- switch op.aux {
- case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ m[auxint] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.AuxInt\n", auxint, v)
}
- x := a[1 : len(a)-1] // remove {}
- if !isVariable(x) {
- // code
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
+ }
+ }
+
+ if aux != "" {
+
+ if !isVariable(aux) {
+ // code
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[aux]; ok {
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
canFail = true
} else {
- // variable
- if _, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
- canFail = true
- } else {
- m[x] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Aux\n", x, v)
- }
+ m[aux] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Aux\n", aux, v)
}
- } else if a == "_" {
- argnum++
- } else if !strings.Contains(a, "(") {
+ }
+ }
+
+ for i, arg := range args {
+ if arg == "_" {
+ continue
+ }
+ if !strings.Contains(arg, "(") {
// leaf variable
- if _, ok := m[a]; ok {
+ if _, ok := m[arg]; ok {
// variable already has a definition. Check whether
// the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering).
- fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", a, v, argnum)
+ fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", arg, v, i)
canFail = true
} else {
// remember that this variable references the given value
- m[a] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Args[%d]\n", a, v, argnum)
+ m[arg] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Args[%d]\n", arg, v, i)
}
- argnum++
+ continue
+ }
+ // compound sexpr
+ var argname string
+ colon := strings.Index(arg, ":")
+ openparen := strings.Index(arg, "(")
+ if colon >= 0 && openparen >= 0 && colon < openparen {
+ // rule-specified name
+ argname = arg[:colon]
+ arg = arg[colon+1:]
} else {
- // compound sexpr
- var argname string
- colon := strings.Index(a, ":")
- openparen := strings.Index(a, "(")
- if colon >= 0 && openparen >= 0 && colon < openparen {
- // rule-specified name
- argname = a[:colon]
- a = a[colon+1:]
- } else {
- // autogenerated name
- argname = fmt.Sprintf("%s_%d", v, argnum)
- }
- fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, argnum)
- if genMatch0(w, arch, a, argname, m, false, loc) {
- canFail = true
- }
- argnum++
+ // autogenerated name
+ argname = fmt.Sprintf("%s_%d", v, i)
+ }
+ fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, i)
+ if genMatch0(w, arch, arg, argname, m, false, loc) {
+ canFail = true
}
}
+
if op.argLength == -1 {
- fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, argnum)
+ fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, len(args))
canFail = true
- } else if int(op.argLength) != argnum {
- log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
}
return canFail
}
@@ -500,105 +469,44 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo
return result
}
- s := split(result[1 : len(result)-1]) // remove parens, then split
-
- // Find op record
- var op opData
- for _, x := range genericOps {
- if x.name == s[0] {
- op = x
- break
- }
- }
- for _, x := range arch.ops {
- if x.name == s[0] {
- op = x
- break
- }
- }
- if op.name == "" {
- log.Fatalf("%s: unknown op %s", loc, s[0])
- }
+ op, oparch, typ, auxint, aux, args := parseValue(result, arch, loc)
// Find the type of the variable.
- var opType string
- var typeOverride bool
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction
- opType = a[1 : len(a)-1] // remove <>
- typeOverride = true
- break
- }
- }
- if opType == "" {
- // find default type, if any
- for _, op := range arch.ops {
- if op.name == s[0] && op.typ != "" {
- opType = typeName(op.typ)
- break
- }
- }
- }
- if opType == "" {
- for _, op := range genericOps {
- if op.name == s[0] && op.typ != "" {
- opType = typeName(op.typ)
- break
- }
- }
+ typeOverride := typ != ""
+ if typ == "" && op.typ != "" {
+ typ = typeName(op.typ)
}
+
var v string
if top && !move {
v = "v"
- fmt.Fprintf(w, "v.reset(%s)\n", opName(s[0], arch))
+ fmt.Fprintf(w, "v.reset(Op%s%s)\n", oparch, op.name)
if typeOverride {
- fmt.Fprintf(w, "v.Type = %s\n", opType)
+ fmt.Fprintf(w, "v.Type = %s\n", typ)
}
} else {
- if opType == "" {
- log.Fatalf("sub-expression %s (op=%s) must have a type", result, s[0])
+ if typ == "" {
+ log.Fatalf("sub-expression %s (op=Op%s%s) must have a type", result, oparch, op.name)
}
v = fmt.Sprintf("v%d", *alloc)
*alloc++
- fmt.Fprintf(w, "%s := b.NewValue0(v.Line, %s, %s)\n", v, opName(s[0], arch), opType)
+ fmt.Fprintf(w, "%s := b.NewValue0(v.Line, Op%s%s, %s)\n", v, oparch, op.name, typ)
if move && top {
// Rewrite original into a copy
fmt.Fprintf(w, "v.reset(OpCopy)\n")
fmt.Fprintf(w, "v.AddArg(%s)\n", v)
}
}
- argnum := 0
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction, handled above
- } else if a[0] == '[' {
- // auxint restriction
- switch op.aux {
- case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove []
- fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x)
- } else if a[0] == '{' {
- // aux restriction
- switch op.aux {
- case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove {}
- fmt.Fprintf(w, "%s.Aux = %s\n", v, x)
- } else {
- // regular argument (sexpr or variable)
- x := genResult0(w, arch, a, alloc, false, move, loc)
- fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
- argnum++
- }
+
+ if auxint != "" {
+ fmt.Fprintf(w, "%s.AuxInt = %s\n", v, auxint)
+ }
+ if aux != "" {
+ fmt.Fprintf(w, "%s.Aux = %s\n", v, aux)
}
- if op.argLength != -1 && int(op.argLength) != argnum {
- log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
+ for _, arg := range args {
+ x := genResult0(w, arch, arg, alloc, false, move, loc)
+ fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
}
return v
@@ -666,16 +574,102 @@ func isBlock(name string, arch arch) bool {
return false
}
-// opName converts from an op name specified in a rule file to an Op enum.
-// if the name matches a generic op, returns "Op" plus the specified name.
-// Otherwise, returns "Op" plus arch name plus op name.
-func opName(name string, arch arch) string {
- for _, op := range genericOps {
- if op.name == name {
- return "Op" + name
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) {
+ val = val[1 : len(val)-1] // remove ()
+
+ // Split val up into regions.
+ // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+ s := split(val)
+
+ // Extract restrictions and args.
+ for _, a := range s[1:] {
+ switch a[0] {
+ case '<':
+ typ = a[1 : len(a)-1] // remove <>
+ case '[':
+ auxint = a[1 : len(a)-1] // remove []
+ case '{':
+ aux = a[1 : len(a)-1] // remove {}
+ default:
+ args = append(args, a)
+ }
+ }
+
+ // Resolve the op.
+
+ // match reports whether x is a good op to select.
+ // If strict is true, rule generation might succeed.
+ // If strict is false, rule generation has failed,
+ // but we're trying to generate a useful error.
+ // Doing strict=true then strict=false allows
+ // precise op matching while retaining good error messages.
+ match := func(x opData, strict bool, archname string) bool {
+ if x.name != s[0] {
+ return false
+ }
+ if x.argLength != -1 && int(x.argLength) != len(args) {
+ if strict {
+ return false
+ } else {
+ log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s[0], archname, op.argLength, len(args))
+ }
+ }
+ return true
+ }
+
+ for _, x := range genericOps {
+ if match(x, true, "generic") {
+ op = x
+ break
}
}
- return "Op" + arch.name + name
+ if arch.name != "generic" {
+ for _, x := range arch.ops {
+ if match(x, true, arch.name) {
+ if op.name != "" {
+ log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+ }
+ op = x
+ oparch = arch.name
+ break
+ }
+ }
+ }
+
+ if op.name == "" {
+ // Failed to find the op.
+ // Run through everything again with strict=false
+ // to generate useful diagnosic messages before failing.
+ for _, x := range genericOps {
+ match(x, false, "generic")
+ }
+ for _, x := range arch.ops {
+ match(x, false, arch.name)
+ }
+ log.Fatalf("%s: unknown op %s", loc, s)
+ }
+
+ // Sanity check aux, auxint.
+ if auxint != "" {
+ switch op.aux {
+ case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
+ default:
+ log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+ }
+ }
+ if aux != "" {
+ switch op.aux {
+ case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
+ default:
+ log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ }
+ }
+
+ return
}
func blockName(name string, arch arch) string {
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index a6ded59452..d54a9cbc08 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -20,6 +20,184 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_Op386ANDL(v, config)
case Op386ANDLconst:
return rewriteValue386_Op386ANDLconst(v, config)
+ case Op386CMPB:
+ return rewriteValue386_Op386CMPB(v, config)
+ case Op386CMPBconst:
+ return rewriteValue386_Op386CMPBconst(v, config)
+ case Op386CMPL:
+ return rewriteValue386_Op386CMPL(v, config)
+ case Op386CMPLconst:
+ return rewriteValue386_Op386CMPLconst(v, config)
+ case Op386CMPW:
+ return rewriteValue386_Op386CMPW(v, config)
+ case Op386CMPWconst:
+ return rewriteValue386_Op386CMPWconst(v, config)
+ case Op386LEAL:
+ return rewriteValue386_Op386LEAL(v, config)
+ case Op386LEAL1:
+ return rewriteValue386_Op386LEAL1(v, config)
+ case Op386LEAL2:
+ return rewriteValue386_Op386LEAL2(v, config)
+ case Op386LEAL4:
+ return rewriteValue386_Op386LEAL4(v, config)
+ case Op386LEAL8:
+ return rewriteValue386_Op386LEAL8(v, config)
+ case Op386MOVBLSX:
+ return rewriteValue386_Op386MOVBLSX(v, config)
+ case Op386MOVBLSXload:
+ return rewriteValue386_Op386MOVBLSXload(v, config)
+ case Op386MOVBLZX:
+ return rewriteValue386_Op386MOVBLZX(v, config)
+ case Op386MOVBload:
+ return rewriteValue386_Op386MOVBload(v, config)
+ case Op386MOVBloadidx1:
+ return rewriteValue386_Op386MOVBloadidx1(v, config)
+ case Op386MOVBstore:
+ return rewriteValue386_Op386MOVBstore(v, config)
+ case Op386MOVBstoreconst:
+ return rewriteValue386_Op386MOVBstoreconst(v, config)
+ case Op386MOVBstoreconstidx1:
+ return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
+ case Op386MOVBstoreidx1:
+ return rewriteValue386_Op386MOVBstoreidx1(v, config)
+ case Op386MOVLload:
+ return rewriteValue386_Op386MOVLload(v, config)
+ case Op386MOVLloadidx1:
+ return rewriteValue386_Op386MOVLloadidx1(v, config)
+ case Op386MOVLloadidx4:
+ return rewriteValue386_Op386MOVLloadidx4(v, config)
+ case Op386MOVLstore:
+ return rewriteValue386_Op386MOVLstore(v, config)
+ case Op386MOVLstoreconst:
+ return rewriteValue386_Op386MOVLstoreconst(v, config)
+ case Op386MOVLstoreconstidx1:
+ return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
+ case Op386MOVLstoreconstidx4:
+ return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
+ case Op386MOVLstoreidx1:
+ return rewriteValue386_Op386MOVLstoreidx1(v, config)
+ case Op386MOVLstoreidx4:
+ return rewriteValue386_Op386MOVLstoreidx4(v, config)
+ case Op386MOVSDload:
+ return rewriteValue386_Op386MOVSDload(v, config)
+ case Op386MOVSDloadidx1:
+ return rewriteValue386_Op386MOVSDloadidx1(v, config)
+ case Op386MOVSDloadidx8:
+ return rewriteValue386_Op386MOVSDloadidx8(v, config)
+ case Op386MOVSDstore:
+ return rewriteValue386_Op386MOVSDstore(v, config)
+ case Op386MOVSDstoreidx1:
+ return rewriteValue386_Op386MOVSDstoreidx1(v, config)
+ case Op386MOVSDstoreidx8:
+ return rewriteValue386_Op386MOVSDstoreidx8(v, config)
+ case Op386MOVSSload:
+ return rewriteValue386_Op386MOVSSload(v, config)
+ case Op386MOVSSloadidx1:
+ return rewriteValue386_Op386MOVSSloadidx1(v, config)
+ case Op386MOVSSloadidx4:
+ return rewriteValue386_Op386MOVSSloadidx4(v, config)
+ case Op386MOVSSstore:
+ return rewriteValue386_Op386MOVSSstore(v, config)
+ case Op386MOVSSstoreidx1:
+ return rewriteValue386_Op386MOVSSstoreidx1(v, config)
+ case Op386MOVSSstoreidx4:
+ return rewriteValue386_Op386MOVSSstoreidx4(v, config)
+ case Op386MOVWLSX:
+ return rewriteValue386_Op386MOVWLSX(v, config)
+ case Op386MOVWLSXload:
+ return rewriteValue386_Op386MOVWLSXload(v, config)
+ case Op386MOVWLZX:
+ return rewriteValue386_Op386MOVWLZX(v, config)
+ case Op386MOVWload:
+ return rewriteValue386_Op386MOVWload(v, config)
+ case Op386MOVWloadidx1:
+ return rewriteValue386_Op386MOVWloadidx1(v, config)
+ case Op386MOVWloadidx2:
+ return rewriteValue386_Op386MOVWloadidx2(v, config)
+ case Op386MOVWstore:
+ return rewriteValue386_Op386MOVWstore(v, config)
+ case Op386MOVWstoreconst:
+ return rewriteValue386_Op386MOVWstoreconst(v, config)
+ case Op386MOVWstoreconstidx1:
+ return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
+ case Op386MOVWstoreconstidx2:
+ return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
+ case Op386MOVWstoreidx1:
+ return rewriteValue386_Op386MOVWstoreidx1(v, config)
+ case Op386MOVWstoreidx2:
+ return rewriteValue386_Op386MOVWstoreidx2(v, config)
+ case Op386MULL:
+ return rewriteValue386_Op386MULL(v, config)
+ case Op386MULLconst:
+ return rewriteValue386_Op386MULLconst(v, config)
+ case Op386NEGL:
+ return rewriteValue386_Op386NEGL(v, config)
+ case Op386NOTL:
+ return rewriteValue386_Op386NOTL(v, config)
+ case Op386ORL:
+ return rewriteValue386_Op386ORL(v, config)
+ case Op386ORLconst:
+ return rewriteValue386_Op386ORLconst(v, config)
+ case Op386ROLBconst:
+ return rewriteValue386_Op386ROLBconst(v, config)
+ case Op386ROLLconst:
+ return rewriteValue386_Op386ROLLconst(v, config)
+ case Op386ROLWconst:
+ return rewriteValue386_Op386ROLWconst(v, config)
+ case Op386SARB:
+ return rewriteValue386_Op386SARB(v, config)
+ case Op386SARBconst:
+ return rewriteValue386_Op386SARBconst(v, config)
+ case Op386SARL:
+ return rewriteValue386_Op386SARL(v, config)
+ case Op386SARLconst:
+ return rewriteValue386_Op386SARLconst(v, config)
+ case Op386SARW:
+ return rewriteValue386_Op386SARW(v, config)
+ case Op386SARWconst:
+ return rewriteValue386_Op386SARWconst(v, config)
+ case Op386SBBL:
+ return rewriteValue386_Op386SBBL(v, config)
+ case Op386SBBLcarrymask:
+ return rewriteValue386_Op386SBBLcarrymask(v, config)
+ case Op386SETA:
+ return rewriteValue386_Op386SETA(v, config)
+ case Op386SETAE:
+ return rewriteValue386_Op386SETAE(v, config)
+ case Op386SETB:
+ return rewriteValue386_Op386SETB(v, config)
+ case Op386SETBE:
+ return rewriteValue386_Op386SETBE(v, config)
+ case Op386SETEQ:
+ return rewriteValue386_Op386SETEQ(v, config)
+ case Op386SETG:
+ return rewriteValue386_Op386SETG(v, config)
+ case Op386SETGE:
+ return rewriteValue386_Op386SETGE(v, config)
+ case Op386SETL:
+ return rewriteValue386_Op386SETL(v, config)
+ case Op386SETLE:
+ return rewriteValue386_Op386SETLE(v, config)
+ case Op386SETNE:
+ return rewriteValue386_Op386SETNE(v, config)
+ case Op386SHLL:
+ return rewriteValue386_Op386SHLL(v, config)
+ case Op386SHRB:
+ return rewriteValue386_Op386SHRB(v, config)
+ case Op386SHRL:
+ return rewriteValue386_Op386SHRL(v, config)
+ case Op386SHRW:
+ return rewriteValue386_Op386SHRW(v, config)
+ case Op386SUBL:
+ return rewriteValue386_Op386SUBL(v, config)
+ case Op386SUBLcarry:
+ return rewriteValue386_Op386SUBLcarry(v, config)
+ case Op386SUBLconst:
+ return rewriteValue386_Op386SUBLconst(v, config)
+ case Op386XORL:
+ return rewriteValue386_Op386XORL(v, config)
+ case Op386XORLconst:
+ return rewriteValue386_Op386XORLconst(v, config)
case OpAdd16:
return rewriteValue386_OpAdd16(v, config)
case OpAdd32:
@@ -48,18 +226,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpAndB(v, config)
case OpBswap32:
return rewriteValue386_OpBswap32(v, config)
- case Op386CMPB:
- return rewriteValue386_Op386CMPB(v, config)
- case Op386CMPBconst:
- return rewriteValue386_Op386CMPBconst(v, config)
- case Op386CMPL:
- return rewriteValue386_Op386CMPL(v, config)
- case Op386CMPLconst:
- return rewriteValue386_Op386CMPLconst(v, config)
- case Op386CMPW:
- return rewriteValue386_Op386CMPW(v, config)
- case Op386CMPWconst:
- return rewriteValue386_Op386CMPWconst(v, config)
case OpClosureCall:
return rewriteValue386_OpClosureCall(v, config)
case OpCom16:
@@ -186,16 +352,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValue386_OpIsSliceInBounds(v, config)
- case Op386LEAL:
- return rewriteValue386_Op386LEAL(v, config)
- case Op386LEAL1:
- return rewriteValue386_Op386LEAL1(v, config)
- case Op386LEAL2:
- return rewriteValue386_Op386LEAL2(v, config)
- case Op386LEAL4:
- return rewriteValue386_Op386LEAL4(v, config)
- case Op386LEAL8:
- return rewriteValue386_Op386LEAL8(v, config)
case OpLeq16:
return rewriteValue386_OpLeq16(v, config)
case OpLeq16U:
@@ -260,94 +416,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValue386_OpLsh8x8(v, config)
- case Op386MOVBLSX:
- return rewriteValue386_Op386MOVBLSX(v, config)
- case Op386MOVBLSXload:
- return rewriteValue386_Op386MOVBLSXload(v, config)
- case Op386MOVBLZX:
- return rewriteValue386_Op386MOVBLZX(v, config)
- case Op386MOVBload:
- return rewriteValue386_Op386MOVBload(v, config)
- case Op386MOVBloadidx1:
- return rewriteValue386_Op386MOVBloadidx1(v, config)
- case Op386MOVBstore:
- return rewriteValue386_Op386MOVBstore(v, config)
- case Op386MOVBstoreconst:
- return rewriteValue386_Op386MOVBstoreconst(v, config)
- case Op386MOVBstoreconstidx1:
- return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
- case Op386MOVBstoreidx1:
- return rewriteValue386_Op386MOVBstoreidx1(v, config)
- case Op386MOVLload:
- return rewriteValue386_Op386MOVLload(v, config)
- case Op386MOVLloadidx1:
- return rewriteValue386_Op386MOVLloadidx1(v, config)
- case Op386MOVLloadidx4:
- return rewriteValue386_Op386MOVLloadidx4(v, config)
- case Op386MOVLstore:
- return rewriteValue386_Op386MOVLstore(v, config)
- case Op386MOVLstoreconst:
- return rewriteValue386_Op386MOVLstoreconst(v, config)
- case Op386MOVLstoreconstidx1:
- return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
- case Op386MOVLstoreconstidx4:
- return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
- case Op386MOVLstoreidx1:
- return rewriteValue386_Op386MOVLstoreidx1(v, config)
- case Op386MOVLstoreidx4:
- return rewriteValue386_Op386MOVLstoreidx4(v, config)
- case Op386MOVSDload:
- return rewriteValue386_Op386MOVSDload(v, config)
- case Op386MOVSDloadidx1:
- return rewriteValue386_Op386MOVSDloadidx1(v, config)
- case Op386MOVSDloadidx8:
- return rewriteValue386_Op386MOVSDloadidx8(v, config)
- case Op386MOVSDstore:
- return rewriteValue386_Op386MOVSDstore(v, config)
- case Op386MOVSDstoreidx1:
- return rewriteValue386_Op386MOVSDstoreidx1(v, config)
- case Op386MOVSDstoreidx8:
- return rewriteValue386_Op386MOVSDstoreidx8(v, config)
- case Op386MOVSSload:
- return rewriteValue386_Op386MOVSSload(v, config)
- case Op386MOVSSloadidx1:
- return rewriteValue386_Op386MOVSSloadidx1(v, config)
- case Op386MOVSSloadidx4:
- return rewriteValue386_Op386MOVSSloadidx4(v, config)
- case Op386MOVSSstore:
- return rewriteValue386_Op386MOVSSstore(v, config)
- case Op386MOVSSstoreidx1:
- return rewriteValue386_Op386MOVSSstoreidx1(v, config)
- case Op386MOVSSstoreidx4:
- return rewriteValue386_Op386MOVSSstoreidx4(v, config)
- case Op386MOVWLSX:
- return rewriteValue386_Op386MOVWLSX(v, config)
- case Op386MOVWLSXload:
- return rewriteValue386_Op386MOVWLSXload(v, config)
- case Op386MOVWLZX:
- return rewriteValue386_Op386MOVWLZX(v, config)
- case Op386MOVWload:
- return rewriteValue386_Op386MOVWload(v, config)
- case Op386MOVWloadidx1:
- return rewriteValue386_Op386MOVWloadidx1(v, config)
- case Op386MOVWloadidx2:
- return rewriteValue386_Op386MOVWloadidx2(v, config)
- case Op386MOVWstore:
- return rewriteValue386_Op386MOVWstore(v, config)
- case Op386MOVWstoreconst:
- return rewriteValue386_Op386MOVWstoreconst(v, config)
- case Op386MOVWstoreconstidx1:
- return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
- case Op386MOVWstoreconstidx2:
- return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
- case Op386MOVWstoreidx1:
- return rewriteValue386_Op386MOVWstoreidx1(v, config)
- case Op386MOVWstoreidx2:
- return rewriteValue386_Op386MOVWstoreidx2(v, config)
- case Op386MULL:
- return rewriteValue386_Op386MULL(v, config)
- case Op386MULLconst:
- return rewriteValue386_Op386MULLconst(v, config)
case OpMod16:
return rewriteValue386_OpMod16(v, config)
case OpMod16u:
@@ -374,10 +442,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpMul64F(v, config)
case OpMul8:
return rewriteValue386_OpMul8(v, config)
- case Op386NEGL:
- return rewriteValue386_Op386NEGL(v, config)
- case Op386NOTL:
- return rewriteValue386_Op386NOTL(v, config)
case OpNeg16:
return rewriteValue386_OpNeg16(v, config)
case OpNeg32:
@@ -406,10 +470,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpNilCheck(v, config)
case OpNot:
return rewriteValue386_OpNot(v, config)
- case Op386ORL:
- return rewriteValue386_Op386ORL(v, config)
- case Op386ORLconst:
- return rewriteValue386_Op386ORLconst(v, config)
case OpOffPtr:
return rewriteValue386_OpOffPtr(v, config)
case OpOr16:
@@ -420,12 +480,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpOr8(v, config)
case OpOrB:
return rewriteValue386_OpOrB(v, config)
- case Op386ROLBconst:
- return rewriteValue386_Op386ROLBconst(v, config)
- case Op386ROLLconst:
- return rewriteValue386_Op386ROLLconst(v, config)
- case Op386ROLWconst:
- return rewriteValue386_Op386ROLWconst(v, config)
case OpRsh16Ux16:
return rewriteValue386_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -474,56 +528,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValue386_OpRsh8x8(v, config)
- case Op386SARB:
- return rewriteValue386_Op386SARB(v, config)
- case Op386SARBconst:
- return rewriteValue386_Op386SARBconst(v, config)
- case Op386SARL:
- return rewriteValue386_Op386SARL(v, config)
- case Op386SARLconst:
- return rewriteValue386_Op386SARLconst(v, config)
- case Op386SARW:
- return rewriteValue386_Op386SARW(v, config)
- case Op386SARWconst:
- return rewriteValue386_Op386SARWconst(v, config)
- case Op386SBBL:
- return rewriteValue386_Op386SBBL(v, config)
- case Op386SBBLcarrymask:
- return rewriteValue386_Op386SBBLcarrymask(v, config)
- case Op386SETA:
- return rewriteValue386_Op386SETA(v, config)
- case Op386SETAE:
- return rewriteValue386_Op386SETAE(v, config)
- case Op386SETB:
- return rewriteValue386_Op386SETB(v, config)
- case Op386SETBE:
- return rewriteValue386_Op386SETBE(v, config)
- case Op386SETEQ:
- return rewriteValue386_Op386SETEQ(v, config)
- case Op386SETG:
- return rewriteValue386_Op386SETG(v, config)
- case Op386SETGE:
- return rewriteValue386_Op386SETGE(v, config)
- case Op386SETL:
- return rewriteValue386_Op386SETL(v, config)
- case Op386SETLE:
- return rewriteValue386_Op386SETLE(v, config)
- case Op386SETNE:
- return rewriteValue386_Op386SETNE(v, config)
- case Op386SHLL:
- return rewriteValue386_Op386SHLL(v, config)
- case Op386SHRB:
- return rewriteValue386_Op386SHRB(v, config)
- case Op386SHRL:
- return rewriteValue386_Op386SHRL(v, config)
- case Op386SHRW:
- return rewriteValue386_Op386SHRW(v, config)
- case Op386SUBL:
- return rewriteValue386_Op386SUBL(v, config)
- case Op386SUBLcarry:
- return rewriteValue386_Op386SUBLcarry(v, config)
- case Op386SUBLconst:
- return rewriteValue386_Op386SUBLconst(v, config)
case OpSignExt16to32:
return rewriteValue386_OpSignExt16to32(v, config)
case OpSignExt8to16:
@@ -560,10 +564,6 @@ func rewriteValue386(v *Value, config *Config) bool {
return rewriteValue386_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValue386_OpTrunc32to8(v, config)
- case Op386XORL:
- return rewriteValue386_Op386XORL(v, config)
- case Op386XORLconst:
- return rewriteValue386_Op386XORLconst(v, config)
case OpXor16:
return rewriteValue386_OpXor16(v, config)
case OpXor32:
@@ -1181,216 +1181,6 @@ func rewriteValue386_Op386ANDLconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32carry x y)
- // cond:
- // result: (ADDLcarry x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDLcarry)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32withcarry x y c)
- // cond:
- // result: (ADCL x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(Op386ADCL)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (LEAL {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(Op386LEAL)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap32 x)
- // cond:
- // result: (BSWAPL x)
- for {
- x := v.Args[0]
- v.reset(Op386BSWAPL)
- v.AddArg(x)
- return true
- }
-}
func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1405,8 +1195,8 @@ func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(Op386CMPBconst)
- v.AddArg(x)
v.AuxInt = int64(int8(c))
+ v.AddArg(x)
return true
}
// match: (CMPB (MOVLconst [c]) x)
@@ -1421,8 +1211,8 @@ func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1435,12 +1225,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)==int8(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) == int8(y)) {
break
}
@@ -1451,12 +1241,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -1467,12 +1257,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -1483,12 +1273,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -1499,12 +1289,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -1515,12 +1305,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond: 0 <= int8(m) && int8(m) < int8(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int8(m) && int8(m) < int8(n)) {
break
}
@@ -1531,15 +1321,15 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTB x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTB)
v.AddArg(x)
v.AddArg(y)
@@ -1549,15 +1339,15 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTBconst [int64(int8(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@@ -1567,10 +1357,10 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTB x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(Op386TESTB)
v.AddArg(x)
v.AddArg(x)
@@ -1592,8 +1382,8 @@ func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(Op386CMPLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPL (MOVLconst [c]) x)
@@ -1608,8 +1398,8 @@ func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1622,12 +1412,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -1638,12 +1428,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -1654,12 +1444,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -1670,12 +1460,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -1686,12 +1476,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -1702,12 +1492,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386SHRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
@@ -1718,12 +1508,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -1734,15 +1524,15 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTL x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTL)
v.AddArg(x)
v.AddArg(y)
@@ -1752,15 +1542,15 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTLconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTLconst)
v.AuxInt = c
v.AddArg(x)
@@ -1770,10 +1560,10 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTL x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(Op386TESTL)
v.AddArg(x)
v.AddArg(x)
@@ -1795,8 +1585,8 @@ func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(Op386CMPWconst)
- v.AddArg(x)
v.AuxInt = int64(int16(c))
+ v.AddArg(x)
return true
}
// match: (CMPW (MOVLconst [c]) x)
@@ -1811,8 +1601,8 @@ func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int16(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1825,12 +1615,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) == int16(y)) {
break
}
@@ -1841,12 +1631,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -1857,12 +1647,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -1873,12 +1663,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -1889,12 +1679,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -1905,12 +1695,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int16(m) && int16(m) < int16(n)) {
break
}
@@ -1921,15 +1711,15 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTW x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTW)
v.AddArg(x)
v.AddArg(y)
@@ -1939,15 +1729,15 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTWconst [int64(int16(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@@ -1957,10 +1747,10 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTW x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(Op386TESTW)
v.AddArg(x)
v.AddArg(x)
@@ -1968,979 +1758,6 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(Op386CALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(Op386NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(Op386NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(Op386NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVSSconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVSSconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVSDconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVSDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVLconst [b])
- for {
- b := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVLconst [0])
- for {
- v.reset(Op386MOVLconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValue386_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert <t> x mem)
- // cond:
- // result: (MOVLconvert <t> x mem)
- for {
- t := v.Type
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(Op386MOVLconvert)
- v.Type = t
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (CVTTSS2SL x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTTSS2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (CVTSS2SD x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSS2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (CVTSL2SS x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSL2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (CVTSL2SD x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSL2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (CVTTSD2SL x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTTSD2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (CVTSD2SS x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSD2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(Op386CALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (DIVW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (DIVWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (DIVL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (DIVLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVW)
- v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVWU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (SETEQ (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (SETEQF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (SETEQF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (SETGE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (SETAE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (SETGE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (SETAE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (SETGE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (SETAE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(Op386LoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValue386_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetG mem)
- // cond:
- // result: (LoweredGetG mem)
- for {
- mem := v.Args[0]
- v.reset(Op386LoweredGetG)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(Op386CALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (SETG (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETG)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (SETA (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETA)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (SETG (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETG)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (SETGF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (SETA (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETA)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (SETGF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (SETG (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETG)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (SETA (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETA)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (HMULW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (HMULWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (HMULB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (HMULBU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULBU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(Op386CALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (SETB (CMPL idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil p)
- // cond:
- // result: (SETNE (TESTL p p))
- for {
- p := v.Args[0]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (SETBE (CMPL idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3629,742 +2446,6 @@ func rewriteValue386_Op386LEAL8(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (SETLE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (SETBE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (SETLE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (SETBE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (SETLE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (SETBE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (SETL (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETL)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (SETB (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (SETL (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETL)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (SETGF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (SETB (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (SETGF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (SETL (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETL)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (SETB (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: (is32BitInt(t) || isPtr(t))
- // result: (MOVLload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(Op386MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is16BitInt(t)
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(Op386MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (t.IsBoolean() || is8BitInt(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean() || is8BitInt(t)) {
- break
- }
- v.reset(Op386MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVSSload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(Op386MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVSDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(Op386MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (ROLWconst <t> [c&15] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(Op386ROLWconst)
- v.Type = t
- v.AuxInt = c & 15
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 <t> x [c])
- // cond:
- // result: (ROLLconst <t> [c&31] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(Op386ROLLconst)
- v.Type = t
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (ROLBconst <t> [c&7] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(Op386ROLBconst)
- v.Type = t
- v.AuxInt = c & 7
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SHLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(Op386SHLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh16x64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SHLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(Op386SHLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh32x64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SHLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(Op386SHLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh8x64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -8924,487 +7005,6 @@ func rewriteValue386_Op386MULLconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (MODW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (MODWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (MODL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (MODLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODW)
- v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODWU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
- for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(Op386MOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(Op386MOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVLstore dst (MOVLload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
- break
- }
- v.reset(Op386MOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 5
- // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 5) {
- break
- }
- v.reset(Op386MOVBstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 6
- // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 6) {
- break
- }
- v.reset(Op386MOVWstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 7
- // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 7) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 8
- // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 8) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4]) (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4]) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
- v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = SizeAndAlign(s).Size() % 4
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = SizeAndAlign(s).Size() % 4
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(Op386DUFFCOPY)
- v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
- // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
- break
- }
- v.reset(Op386REPMOVSL)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
- v0.AuxInt = SizeAndAlign(s).Size() / 4
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValue386_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32uhilo x y)
- // cond:
- // result: (MULLQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULLQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -9441,225 +7041,6 @@ func rewriteValue386_Op386NOTL(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(Op386NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(Op386NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(Op386PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(Op386PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(Op386NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (SETNE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (SETNEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (SETNEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(Op386LoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORLconst [1] x)
- for {
- x := v.Args[0]
- v.reset(Op386XORLconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -10069,81 +7450,6 @@ func rewriteValue386_Op386ORLconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDLconst [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(Op386ADDLconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValue386_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -10246,705 +7552,6 @@ func rewriteValue386_Op386ROLWconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SHRWconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(Op386SHRWconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh16Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SARWconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(Op386SARWconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (SARWconst x [15])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(Op386SARWconst)
- v.AddArg(x)
- v.AuxInt = 15
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SHRLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(Op386SHRLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SARLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(Op386SARLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (SARLconst x [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(Op386SARLconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SHRBconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(Op386SHRBconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh8Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SARBconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(Op386SARBconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (SARBconst x [7])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(Op386SARBconst)
- v.AddArg(x)
- v.AuxInt = 7
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12182,8 +8789,8 @@ func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(Op386SUBLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBL (MOVLconst [c]) x)
@@ -12198,8 +8805,8 @@ func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(Op386NEGL)
v0 := b.NewValue0(v.Line, Op386SUBLconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12266,6 +8873,3495 @@ func rewriteValue386_Op386SUBLconst(v *Value, config *Config) bool {
return true
}
}
+func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDLcarry x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDLcarry)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADCL x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(Op386ADCL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAL {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(Op386LEAL)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386BSWAPL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVSSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVSSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVSDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVSDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVLconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValue386_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVLconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386MOVLconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CVTTSS2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTTSS2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (CVTSS2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSS2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CVTSL2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSL2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CVTSL2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSL2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CVTTSD2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTTSD2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (CVTSD2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSD2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(Op386CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(Op386LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValue386_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(Op386LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(Op386CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (HMULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (HMULWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (HMULB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (HMULBU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULBU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPL idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond:
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v.Args[0]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (SETBE (CMPL idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (ROLWconst <t> [c&15] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLWconst)
+ v.Type = t
+ v.AuxInt = c & 15
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (ROLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (ROLBconst <t> [c&7] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLBconst)
+ v.Type = t
+ v.AuxInt = c & 7
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MODL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4]) (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4]) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
+ v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 4
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 4
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFCOPY)
+ v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
+ // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
+ break
+ }
+ v.reset(Op386REPMOVSL)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v0.AuxInt = SizeAndAlign(s).Size() / 4
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32uhilo x y)
+ // cond:
+ // result: (MULLQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULLQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(Op386PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(Op386PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORLconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDLconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValue386_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHRWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SARWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SARWconst x [15])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = 15
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SARLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SARLconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHRBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SARBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SARBconst x [7])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = 7
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValue386_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12314,8 +12410,8 @@ func rewriteValue386_OpSignmask(v *Value, config *Config) bool {
for {
x := v.Args[0]
v.reset(Op386SARLconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
}
@@ -12604,102 +12700,6 @@ func rewriteValue386_OpTrunc32to8(v *Value, config *Config) bool {
return true
}
}
-func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORL x (MOVLconst [c]))
- // cond:
- // result: (XORLconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != Op386MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(Op386XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (MOVLconst [c]) x)
- // cond:
- // result: (XORLconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != Op386MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(Op386XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL x x)
- // cond:
- // result: (MOVLconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(Op386MOVLconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORLconst [c] (XORLconst [d] x))
- // cond:
- // result: (XORLconst [c ^ d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != Op386XORLconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(Op386XORLconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] x)
- // cond: int32(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int32(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] (MOVLconst [d]))
- // cond:
- // result: (MOVLconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != Op386MOVLconst {
- break
- }
- d := v_0.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValue386_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12903,8 +12903,8 @@ func rewriteValue386_OpZero(v *Value, config *Config) bool {
v.reset(OpZero)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
v0 := b.NewValue0(v.Line, Op386ADDLconst, config.fe.TypeUInt32())
- v0.AddArg(destptr)
v0.AuxInt = SizeAndAlign(s).Size() % 4
+ v0.AddArg(destptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
v1.AuxInt = 0
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 154e1e9231..19f01a0148 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -24,6 +24,250 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpAMD64ANDQ(v, config)
case OpAMD64ANDQconst:
return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
+ case OpAMD64CMOVLEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
+ case OpAMD64CMOVQEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
+ case OpAMD64CMOVWEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
+ case OpAMD64CMPB:
+ return rewriteValueAMD64_OpAMD64CMPB(v, config)
+ case OpAMD64CMPBconst:
+ return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
+ case OpAMD64CMPL:
+ return rewriteValueAMD64_OpAMD64CMPL(v, config)
+ case OpAMD64CMPLconst:
+ return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
+ case OpAMD64CMPQ:
+ return rewriteValueAMD64_OpAMD64CMPQ(v, config)
+ case OpAMD64CMPQconst:
+ return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
+ case OpAMD64CMPW:
+ return rewriteValueAMD64_OpAMD64CMPW(v, config)
+ case OpAMD64CMPWconst:
+ return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
+ case OpAMD64LEAQ:
+ return rewriteValueAMD64_OpAMD64LEAQ(v, config)
+ case OpAMD64LEAQ1:
+ return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
+ case OpAMD64LEAQ2:
+ return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
+ case OpAMD64LEAQ4:
+ return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
+ case OpAMD64LEAQ8:
+ return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
+ case OpAMD64MOVBQSX:
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
+ case OpAMD64MOVBQSXload:
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
+ case OpAMD64MOVBQZX:
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
+ case OpAMD64MOVBload:
+ return rewriteValueAMD64_OpAMD64MOVBload(v, config)
+ case OpAMD64MOVBloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
+ case OpAMD64MOVBstore:
+ return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
+ case OpAMD64MOVBstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
+ case OpAMD64MOVBstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
+ case OpAMD64MOVBstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
+ case OpAMD64MOVLQSX:
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
+ case OpAMD64MOVLQSXload:
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
+ case OpAMD64MOVLQZX:
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
+ case OpAMD64MOVLload:
+ return rewriteValueAMD64_OpAMD64MOVLload(v, config)
+ case OpAMD64MOVLloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
+ case OpAMD64MOVLloadidx4:
+ return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
+ case OpAMD64MOVLstore:
+ return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
+ case OpAMD64MOVLstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
+ case OpAMD64MOVLstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
+ case OpAMD64MOVLstoreconstidx4:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
+ case OpAMD64MOVLstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
+ case OpAMD64MOVLstoreidx4:
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
+ case OpAMD64MOVOload:
+ return rewriteValueAMD64_OpAMD64MOVOload(v, config)
+ case OpAMD64MOVOstore:
+ return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
+ case OpAMD64MOVQload:
+ return rewriteValueAMD64_OpAMD64MOVQload(v, config)
+ case OpAMD64MOVQloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
+ case OpAMD64MOVQloadidx8:
+ return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
+ case OpAMD64MOVQstore:
+ return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
+ case OpAMD64MOVQstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
+ case OpAMD64MOVQstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
+ case OpAMD64MOVQstoreconstidx8:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
+ case OpAMD64MOVQstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
+ case OpAMD64MOVQstoreidx8:
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
+ case OpAMD64MOVSDload:
+ return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
+ case OpAMD64MOVSDloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
+ case OpAMD64MOVSDloadidx8:
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
+ case OpAMD64MOVSDstore:
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
+ case OpAMD64MOVSDstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
+ case OpAMD64MOVSDstoreidx8:
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
+ case OpAMD64MOVSSload:
+ return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
+ case OpAMD64MOVSSloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
+ case OpAMD64MOVSSloadidx4:
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
+ case OpAMD64MOVSSstore:
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
+ case OpAMD64MOVSSstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
+ case OpAMD64MOVSSstoreidx4:
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
+ case OpAMD64MOVWQSX:
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
+ case OpAMD64MOVWQSXload:
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
+ case OpAMD64MOVWQZX:
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
+ case OpAMD64MOVWload:
+ return rewriteValueAMD64_OpAMD64MOVWload(v, config)
+ case OpAMD64MOVWloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
+ case OpAMD64MOVWloadidx2:
+ return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
+ case OpAMD64MOVWstore:
+ return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
+ case OpAMD64MOVWstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
+ case OpAMD64MOVWstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
+ case OpAMD64MOVWstoreconstidx2:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
+ case OpAMD64MOVWstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
+ case OpAMD64MOVWstoreidx2:
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
+ case OpAMD64MULL:
+ return rewriteValueAMD64_OpAMD64MULL(v, config)
+ case OpAMD64MULLconst:
+ return rewriteValueAMD64_OpAMD64MULLconst(v, config)
+ case OpAMD64MULQ:
+ return rewriteValueAMD64_OpAMD64MULQ(v, config)
+ case OpAMD64MULQconst:
+ return rewriteValueAMD64_OpAMD64MULQconst(v, config)
+ case OpAMD64NEGL:
+ return rewriteValueAMD64_OpAMD64NEGL(v, config)
+ case OpAMD64NEGQ:
+ return rewriteValueAMD64_OpAMD64NEGQ(v, config)
+ case OpAMD64NOTL:
+ return rewriteValueAMD64_OpAMD64NOTL(v, config)
+ case OpAMD64NOTQ:
+ return rewriteValueAMD64_OpAMD64NOTQ(v, config)
+ case OpAMD64ORL:
+ return rewriteValueAMD64_OpAMD64ORL(v, config)
+ case OpAMD64ORLconst:
+ return rewriteValueAMD64_OpAMD64ORLconst(v, config)
+ case OpAMD64ORQ:
+ return rewriteValueAMD64_OpAMD64ORQ(v, config)
+ case OpAMD64ORQconst:
+ return rewriteValueAMD64_OpAMD64ORQconst(v, config)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
+ case OpAMD64SARB:
+ return rewriteValueAMD64_OpAMD64SARB(v, config)
+ case OpAMD64SARBconst:
+ return rewriteValueAMD64_OpAMD64SARBconst(v, config)
+ case OpAMD64SARL:
+ return rewriteValueAMD64_OpAMD64SARL(v, config)
+ case OpAMD64SARLconst:
+ return rewriteValueAMD64_OpAMD64SARLconst(v, config)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64_OpAMD64SARQ(v, config)
+ case OpAMD64SARQconst:
+ return rewriteValueAMD64_OpAMD64SARQconst(v, config)
+ case OpAMD64SARW:
+ return rewriteValueAMD64_OpAMD64SARW(v, config)
+ case OpAMD64SARWconst:
+ return rewriteValueAMD64_OpAMD64SARWconst(v, config)
+ case OpAMD64SBBLcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
+ case OpAMD64SBBQcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
+ case OpAMD64SETA:
+ return rewriteValueAMD64_OpAMD64SETA(v, config)
+ case OpAMD64SETAE:
+ return rewriteValueAMD64_OpAMD64SETAE(v, config)
+ case OpAMD64SETB:
+ return rewriteValueAMD64_OpAMD64SETB(v, config)
+ case OpAMD64SETBE:
+ return rewriteValueAMD64_OpAMD64SETBE(v, config)
+ case OpAMD64SETEQ:
+ return rewriteValueAMD64_OpAMD64SETEQ(v, config)
+ case OpAMD64SETG:
+ return rewriteValueAMD64_OpAMD64SETG(v, config)
+ case OpAMD64SETGE:
+ return rewriteValueAMD64_OpAMD64SETGE(v, config)
+ case OpAMD64SETL:
+ return rewriteValueAMD64_OpAMD64SETL(v, config)
+ case OpAMD64SETLE:
+ return rewriteValueAMD64_OpAMD64SETLE(v, config)
+ case OpAMD64SETNE:
+ return rewriteValueAMD64_OpAMD64SETNE(v, config)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64_OpAMD64SHLL(v, config)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64_OpAMD64SHLQ(v, config)
+ case OpAMD64SHRB:
+ return rewriteValueAMD64_OpAMD64SHRB(v, config)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64_OpAMD64SHRL(v, config)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64_OpAMD64SHRQ(v, config)
+ case OpAMD64SHRW:
+ return rewriteValueAMD64_OpAMD64SHRW(v, config)
+ case OpAMD64SUBL:
+ return rewriteValueAMD64_OpAMD64SUBL(v, config)
+ case OpAMD64SUBLconst:
+ return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
+ case OpAMD64SUBQ:
+ return rewriteValueAMD64_OpAMD64SUBQ(v, config)
+ case OpAMD64SUBQconst:
+ return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
+ case OpAMD64XORL:
+ return rewriteValueAMD64_OpAMD64XORL(v, config)
+ case OpAMD64XORLconst:
+ return rewriteValueAMD64_OpAMD64XORLconst(v, config)
+ case OpAMD64XORQ:
+ return rewriteValueAMD64_OpAMD64XORQ(v, config)
+ case OpAMD64XORQconst:
+ return rewriteValueAMD64_OpAMD64XORQconst(v, config)
case OpAdd16:
return rewriteValueAMD64_OpAdd16(v, config)
case OpAdd32:
@@ -56,28 +300,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpBswap32(v, config)
case OpBswap64:
return rewriteValueAMD64_OpBswap64(v, config)
- case OpAMD64CMOVLEQconst:
- return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
- case OpAMD64CMOVQEQconst:
- return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
- case OpAMD64CMOVWEQconst:
- return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
- case OpAMD64CMPB:
- return rewriteValueAMD64_OpAMD64CMPB(v, config)
- case OpAMD64CMPBconst:
- return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
- case OpAMD64CMPL:
- return rewriteValueAMD64_OpAMD64CMPL(v, config)
- case OpAMD64CMPLconst:
- return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
- case OpAMD64CMPQ:
- return rewriteValueAMD64_OpAMD64CMPQ(v, config)
- case OpAMD64CMPQconst:
- return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
- case OpAMD64CMPW:
- return rewriteValueAMD64_OpAMD64CMPW(v, config)
- case OpAMD64CMPWconst:
- return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
case OpClosureCall:
return rewriteValueAMD64_OpClosureCall(v, config)
case OpCom16:
@@ -240,16 +462,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValueAMD64_OpIsSliceInBounds(v, config)
- case OpAMD64LEAQ:
- return rewriteValueAMD64_OpAMD64LEAQ(v, config)
- case OpAMD64LEAQ1:
- return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
- case OpAMD64LEAQ2:
- return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
- case OpAMD64LEAQ4:
- return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
- case OpAMD64LEAQ8:
- return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
case OpLeq16:
return rewriteValueAMD64_OpLeq16(v, config)
case OpLeq16U:
@@ -332,126 +544,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueAMD64_OpLsh8x8(v, config)
- case OpAMD64MOVBQSX:
- return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
- case OpAMD64MOVBQSXload:
- return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
- case OpAMD64MOVBQZX:
- return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
- case OpAMD64MOVBload:
- return rewriteValueAMD64_OpAMD64MOVBload(v, config)
- case OpAMD64MOVBloadidx1:
- return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
- case OpAMD64MOVBstore:
- return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
- case OpAMD64MOVBstoreconst:
- return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
- case OpAMD64MOVBstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
- case OpAMD64MOVBstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
- case OpAMD64MOVLQSX:
- return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
- case OpAMD64MOVLQSXload:
- return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
- case OpAMD64MOVLQZX:
- return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
- case OpAMD64MOVLload:
- return rewriteValueAMD64_OpAMD64MOVLload(v, config)
- case OpAMD64MOVLloadidx1:
- return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
- case OpAMD64MOVLloadidx4:
- return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
- case OpAMD64MOVLstore:
- return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
- case OpAMD64MOVLstoreconst:
- return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
- case OpAMD64MOVLstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
- case OpAMD64MOVLstoreconstidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
- case OpAMD64MOVLstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
- case OpAMD64MOVLstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
- case OpAMD64MOVOload:
- return rewriteValueAMD64_OpAMD64MOVOload(v, config)
- case OpAMD64MOVOstore:
- return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
- case OpAMD64MOVQload:
- return rewriteValueAMD64_OpAMD64MOVQload(v, config)
- case OpAMD64MOVQloadidx1:
- return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
- case OpAMD64MOVQloadidx8:
- return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
- case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
- case OpAMD64MOVQstoreconst:
- return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
- case OpAMD64MOVQstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
- case OpAMD64MOVQstoreconstidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
- case OpAMD64MOVQstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
- case OpAMD64MOVQstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
- case OpAMD64MOVSDload:
- return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
- case OpAMD64MOVSDloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
- case OpAMD64MOVSDloadidx8:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
- case OpAMD64MOVSDstore:
- return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
- case OpAMD64MOVSDstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
- case OpAMD64MOVSDstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
- case OpAMD64MOVSSload:
- return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
- case OpAMD64MOVSSloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
- case OpAMD64MOVSSloadidx4:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
- case OpAMD64MOVSSstore:
- return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
- case OpAMD64MOVSSstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
- case OpAMD64MOVSSstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
- case OpAMD64MOVWQSX:
- return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
- case OpAMD64MOVWQSXload:
- return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
- case OpAMD64MOVWQZX:
- return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
- case OpAMD64MOVWload:
- return rewriteValueAMD64_OpAMD64MOVWload(v, config)
- case OpAMD64MOVWloadidx1:
- return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
- case OpAMD64MOVWloadidx2:
- return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
- case OpAMD64MOVWstore:
- return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
- case OpAMD64MOVWstoreconst:
- return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
- case OpAMD64MOVWstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
- case OpAMD64MOVWstoreconstidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
- case OpAMD64MOVWstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
- case OpAMD64MOVWstoreidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
- case OpAMD64MULL:
- return rewriteValueAMD64_OpAMD64MULL(v, config)
- case OpAMD64MULLconst:
- return rewriteValueAMD64_OpAMD64MULLconst(v, config)
- case OpAMD64MULQ:
- return rewriteValueAMD64_OpAMD64MULQ(v, config)
- case OpAMD64MULQconst:
- return rewriteValueAMD64_OpAMD64MULQconst(v, config)
case OpMod16:
return rewriteValueAMD64_OpMod16(v, config)
case OpMod16u:
@@ -482,14 +574,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpMul64F(v, config)
case OpMul8:
return rewriteValueAMD64_OpMul8(v, config)
- case OpAMD64NEGL:
- return rewriteValueAMD64_OpAMD64NEGL(v, config)
- case OpAMD64NEGQ:
- return rewriteValueAMD64_OpAMD64NEGQ(v, config)
- case OpAMD64NOTL:
- return rewriteValueAMD64_OpAMD64NOTL(v, config)
- case OpAMD64NOTQ:
- return rewriteValueAMD64_OpAMD64NOTQ(v, config)
case OpNeg16:
return rewriteValueAMD64_OpNeg16(v, config)
case OpNeg32:
@@ -522,14 +606,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpNilCheck(v, config)
case OpNot:
return rewriteValueAMD64_OpNot(v, config)
- case OpAMD64ORL:
- return rewriteValueAMD64_OpAMD64ORL(v, config)
- case OpAMD64ORLconst:
- return rewriteValueAMD64_OpAMD64ORLconst(v, config)
- case OpAMD64ORQ:
- return rewriteValueAMD64_OpAMD64ORQ(v, config)
- case OpAMD64ORQconst:
- return rewriteValueAMD64_OpAMD64ORQconst(v, config)
case OpOffPtr:
return rewriteValueAMD64_OpOffPtr(v, config)
case OpOr16:
@@ -542,14 +618,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpOr8(v, config)
case OpOrB:
return rewriteValueAMD64_OpOrB(v, config)
- case OpAMD64ROLBconst:
- return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
- case OpAMD64ROLLconst:
- return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
- case OpAMD64ROLQconst:
- return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
- case OpAMD64ROLWconst:
- return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
case OpRsh16Ux16:
return rewriteValueAMD64_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -614,66 +682,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValueAMD64_OpRsh8x8(v, config)
- case OpAMD64SARB:
- return rewriteValueAMD64_OpAMD64SARB(v, config)
- case OpAMD64SARBconst:
- return rewriteValueAMD64_OpAMD64SARBconst(v, config)
- case OpAMD64SARL:
- return rewriteValueAMD64_OpAMD64SARL(v, config)
- case OpAMD64SARLconst:
- return rewriteValueAMD64_OpAMD64SARLconst(v, config)
- case OpAMD64SARQ:
- return rewriteValueAMD64_OpAMD64SARQ(v, config)
- case OpAMD64SARQconst:
- return rewriteValueAMD64_OpAMD64SARQconst(v, config)
- case OpAMD64SARW:
- return rewriteValueAMD64_OpAMD64SARW(v, config)
- case OpAMD64SARWconst:
- return rewriteValueAMD64_OpAMD64SARWconst(v, config)
- case OpAMD64SBBLcarrymask:
- return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
- case OpAMD64SBBQcarrymask:
- return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
- case OpAMD64SETA:
- return rewriteValueAMD64_OpAMD64SETA(v, config)
- case OpAMD64SETAE:
- return rewriteValueAMD64_OpAMD64SETAE(v, config)
- case OpAMD64SETB:
- return rewriteValueAMD64_OpAMD64SETB(v, config)
- case OpAMD64SETBE:
- return rewriteValueAMD64_OpAMD64SETBE(v, config)
- case OpAMD64SETEQ:
- return rewriteValueAMD64_OpAMD64SETEQ(v, config)
- case OpAMD64SETG:
- return rewriteValueAMD64_OpAMD64SETG(v, config)
- case OpAMD64SETGE:
- return rewriteValueAMD64_OpAMD64SETGE(v, config)
- case OpAMD64SETL:
- return rewriteValueAMD64_OpAMD64SETL(v, config)
- case OpAMD64SETLE:
- return rewriteValueAMD64_OpAMD64SETLE(v, config)
- case OpAMD64SETNE:
- return rewriteValueAMD64_OpAMD64SETNE(v, config)
- case OpAMD64SHLL:
- return rewriteValueAMD64_OpAMD64SHLL(v, config)
- case OpAMD64SHLQ:
- return rewriteValueAMD64_OpAMD64SHLQ(v, config)
- case OpAMD64SHRB:
- return rewriteValueAMD64_OpAMD64SHRB(v, config)
- case OpAMD64SHRL:
- return rewriteValueAMD64_OpAMD64SHRL(v, config)
- case OpAMD64SHRQ:
- return rewriteValueAMD64_OpAMD64SHRQ(v, config)
- case OpAMD64SHRW:
- return rewriteValueAMD64_OpAMD64SHRW(v, config)
- case OpAMD64SUBL:
- return rewriteValueAMD64_OpAMD64SUBL(v, config)
- case OpAMD64SUBLconst:
- return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
- case OpAMD64SUBQ:
- return rewriteValueAMD64_OpAMD64SUBQ(v, config)
- case OpAMD64SUBQconst:
- return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
case OpSignExt16to32:
return rewriteValueAMD64_OpSignExt16to32(v, config)
case OpSignExt16to64:
@@ -718,14 +726,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpTrunc64to32(v, config)
case OpTrunc64to8:
return rewriteValueAMD64_OpTrunc64to8(v, config)
- case OpAMD64XORL:
- return rewriteValueAMD64_OpAMD64XORL(v, config)
- case OpAMD64XORLconst:
- return rewriteValueAMD64_OpAMD64XORLconst(v, config)
- case OpAMD64XORQ:
- return rewriteValueAMD64_OpAMD64XORQ(v, config)
- case OpAMD64XORQconst:
- return rewriteValueAMD64_OpAMD64XORQconst(v, config)
case OpXor16:
return rewriteValueAMD64_OpXor16(v, config)
case OpXor32:
@@ -1531,242 +1531,6 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64 x y)
- // cond:
- // result: (ADDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (LEAQ {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpAMD64LEAQ)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And64 x y)
- // cond:
- // result: (ANDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Avg64u x y)
- // cond:
- // result: (AVGQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64AVGQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap32 x)
- // cond:
- // result: (BSWAPL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap64 x)
- // cond:
- // result: (BSWAPQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPQ)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1774,28 +1538,28 @@ func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVLNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVLNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVLEQconst _ (FlagEQ) [c])
// cond:
// result: (Const32 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst32)
v.AuxInt = c
return true
@@ -1865,28 +1629,28 @@ func rewriteValueAMD64_OpAMD64CMOVQEQconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVQNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVQNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVQEQconst _ (FlagEQ) [c])
// cond:
// result: (Const64 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst64)
v.AuxInt = c
return true
@@ -1956,28 +1720,28 @@ func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVWNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVWNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVWEQconst _ (FlagEQ) [c])
// cond:
// result: (Const16 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst16)
v.AuxInt = c
return true
@@ -2054,8 +1818,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64CMPBconst)
- v.AddArg(x)
v.AuxInt = int64(int8(c))
+ v.AddArg(x)
return true
}
// match: (CMPB (MOVLconst [c]) x)
@@ -2070,8 +1834,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2084,12 +1848,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)==int8(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) == int8(y)) {
break
}
@@ -2100,12 +1864,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -2116,12 +1880,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -2132,12 +1896,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -2148,12 +1912,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -2164,12 +1928,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: 0 <= int8(m) && int8(m) < int8(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int8(m) && int8(m) < int8(n)) {
break
}
@@ -2180,15 +1944,15 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTB x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTB)
v.AddArg(x)
v.AddArg(y)
@@ -2198,15 +1962,15 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTBconst [int64(int8(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@@ -2216,10 +1980,10 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTB x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTB)
v.AddArg(x)
v.AddArg(x)
@@ -2241,8 +2005,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64CMPLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPL (MOVLconst [c]) x)
@@ -2257,8 +2021,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2271,12 +2035,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -2287,12 +2051,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -2303,12 +2067,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -2319,12 +2083,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -2335,12 +2099,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -2351,12 +2115,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SHRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
@@ -2367,12 +2131,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -2383,15 +2147,15 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTL x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTL)
v.AddArg(x)
v.AddArg(y)
@@ -2401,15 +2165,15 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTLconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTLconst)
v.AuxInt = c
v.AddArg(x)
@@ -2419,10 +2183,10 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTL x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTL)
v.AddArg(x)
v.AddArg(x)
@@ -2447,8 +2211,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
break
}
v.reset(OpAMD64CMPQconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPQ (MOVQconst [c]) x)
@@ -2466,8 +2230,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2480,12 +2244,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x==y
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x == y) {
break
}
@@ -2496,12 +2260,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x<y && uint64(x)<uint64(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x < y && uint64(x) < uint64(y)) {
break
}
@@ -2512,12 +2276,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x<y && uint64(x)>uint64(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x < y && uint64(x) > uint64(y)) {
break
}
@@ -2528,12 +2292,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x>y && uint64(x)<uint64(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x > y && uint64(x) < uint64(y)) {
break
}
@@ -2544,12 +2308,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x>y && uint64(x)>uint64(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x > y && uint64(x) > uint64(y)) {
break
}
@@ -2560,11 +2324,11 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0xFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVBQZX {
break
}
- c := v.AuxInt
if !(0xFF < c) {
break
}
@@ -2575,11 +2339,11 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0xFFFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVWQZX {
break
}
- c := v.AuxInt
if !(0xFFFF < c) {
break
}
@@ -2590,11 +2354,11 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0xFFFFFFFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLQZX {
break
}
- c := v.AuxInt
if !(0xFFFFFFFF < c) {
break
}
@@ -2605,12 +2369,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SHRQconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
break
}
@@ -2621,12 +2385,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0 <= m && m < n
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= m && m < n) {
break
}
@@ -2637,15 +2401,15 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond:
// result: (TESTQ x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQ {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(y)
@@ -2655,15 +2419,15 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond:
// result: (TESTQconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTQconst)
v.AuxInt = c
v.AddArg(x)
@@ -2673,10 +2437,10 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond:
// result: (TESTQ x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(x)
@@ -2698,8 +2462,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64CMPWconst)
- v.AddArg(x)
v.AuxInt = int64(int16(c))
+ v.AddArg(x)
return true
}
// match: (CMPW (MOVLconst [c]) x)
@@ -2714,8 +2478,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int16(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2728,12 +2492,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) == int16(y)) {
break
}
@@ -2744,12 +2508,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -2760,12 +2524,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -2776,12 +2540,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -2792,12 +2556,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -2808,12 +2572,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int16(m) && int16(m) < int16(n)) {
break
}
@@ -2824,15 +2588,15 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTW x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(y)
@@ -2842,15 +2606,15 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTWconst [int64(int16(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@@ -2860,10 +2624,10 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTW x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(x)
@@ -2871,1281 +2635,6 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64CALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com64 x)
- // cond:
- // result: (NOTQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVSSconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVSSconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64 [val])
- // cond:
- // result: (MOVQconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVSDconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVSDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVLconst [b])
- for {
- b := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVQconst [0])
- for {
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert <t> x mem)
- // cond:
- // result: (MOVQconvert <t> x mem)
- for {
- t := v.Type
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64MOVQconvert)
- v.Type = t
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz16 <t> x)
- // cond:
- // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVWEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz32 <t> x)
- // cond:
- // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVLEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 32
- return true
- }
-}
-func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz64 <t> x)
- // cond:
- // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVQEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 64
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (CVTTSS2SL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSS2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64 x)
- // cond:
- // result: (CVTTSS2SQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSS2SQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (CVTSS2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSS2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (CVTSL2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSL2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (CVTSL2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSL2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (CVTTSD2SL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSD2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (CVTSD2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSD2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto64 x)
- // cond:
- // result: (CVTTSD2SQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSD2SQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64to32F x)
- // cond:
- // result: (CVTSQ2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSQ2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64to64F x)
- // cond:
- // result: (CVTSQ2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSQ2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpAMD64CALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (Select0 (DIVW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (Select0 (DIVWU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (Select0 (DIVL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (Select0 (DIVLU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64 x y)
- // cond:
- // result: (Select0 (DIVQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64u x y)
- // cond:
- // result: (Select0 (DIVQU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (SETEQ (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (SETEQF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64 x y)
- // cond:
- // result: (SETEQ (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (SETEQF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (SETEQ (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (SETGE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (SETAE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (SETGE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (SETAE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64 x y)
- // cond:
- // result: (SETGE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64U x y)
- // cond:
- // result: (SETAE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (SETGE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (SETAE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(OpAMD64LoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetG mem)
- // cond:
- // result: (LoweredGetG mem)
- for {
- mem := v.Args[0]
- v.reset(OpAMD64LoweredGetG)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpAMD64CALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (SETG (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (SETA (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (SETG (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (SETGF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (SETA (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64 x y)
- // cond:
- // result: (SETG (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (SETGF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64U x y)
- // cond:
- // result: (SETA (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (SETG (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (SETA (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (HMULW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (HMULWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul64 x y)
- // cond:
- // result: (HMULQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul64u x y)
- // cond:
- // result: (HMULQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (HMULB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (HMULBU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULBU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64CALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (SETB (CMPQ idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil p)
- // cond:
- // result: (SETNE (TESTQ p p))
- for {
- p := v.Args[0]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (SETBE (CMPQ idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4834,893 +3323,6 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (SETLE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (SETBE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (SETLE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (SETBE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64 x y)
- // cond:
- // result: (SETLE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64U x y)
- // cond:
- // result: (SETBE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (SETLE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (SETBE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (SETL (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (SETB (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (SETL (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (SETGF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (SETB (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64 x y)
- // cond:
- // result: (SETL (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (SETGF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64U x y)
- // cond:
- // result: (SETB (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (SETL (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (SETB (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (MOVQload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64MOVQload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitInt(t)
- // result: (MOVLload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is16BitInt(t)
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (t.IsBoolean() || is8BitInt(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean() || is8BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVSSload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(OpAMD64MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVSDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(OpAMD64MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (ROLWconst <t> [c&15] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLWconst)
- v.Type = t
- v.AuxInt = c & 15
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 <t> x [c])
- // cond:
- // result: (ROLLconst <t> [c&31] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLLconst)
- v.Type = t
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot64 <t> x [c])
- // cond:
- // result: (ROLQconst <t> [c&63] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLQconst)
- v.Type = t
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (ROLBconst <t> [c&7] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLBconst)
- v.Type = t
- v.AuxInt = c & 7
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x16 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x32 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x64 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x8 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -11903,604 +9505,6 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (Select1 (DIVW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (Select1 (DIVWU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (Select1 (DIVL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (Select1 (DIVLU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod64 x y)
- // cond:
- // result: (Select1 (DIVQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod64u x y)
- // cond:
- // result: (Select1 (DIVQU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
- for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(OpAMD64MOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVLstore dst (MOVLload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
- break
- }
- v.reset(OpAMD64MOVLstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 8
- // result: (MOVQstore dst (MOVQload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 8) {
- break
- }
- v.reset(OpAMD64MOVQstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 16
- // result: (MOVOstore dst (MOVOload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 16) {
- break
- }
- v.reset(OpAMD64MOVOstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 5
- // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 5) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 6
- // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 6) {
- break
- }
- v.reset(OpAMD64MOVWstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 7
- // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 7) {
- break
- }
- v.reset(OpAMD64MOVLstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
- // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
- break
- }
- v.reset(OpAMD64MOVQstore)
- v.AuxInt = SizeAndAlign(s).Size() - 8
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v0.AuxInt = SizeAndAlign(s).Size() - 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(OpAMD64DUFFCOPY)
- v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
- // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
- break
- }
- v.reset(OpAMD64REPMOVSQ)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = SizeAndAlign(s).Size() / 8
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64 x y)
- // cond:
- // result: (MULQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12573,255 +9577,6 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(OpAMD64PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64 x)
- // cond:
- // result: (NEGQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(OpAMD64PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (SETNE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (SETNEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64 x y)
- // cond:
- // result: (SETNE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (SETNEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (SETNE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64LoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORLconst [1] x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -13771,112 +10526,6 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OffPtr [off] ptr)
- // cond: is32Bit(off)
- // result: (ADDQconst [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- if !(is32Bit(off)) {
- break
- }
- v.reset(OpAMD64ADDQconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDQ (MOVQconst [off]) ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpAMD64ADDQ)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = off
- v.AddArg(v0)
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or64 x y)
- // cond:
- // result: (ORQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -14013,822 +10662,6 @@ func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 <t> x y)
- // cond:
- // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 <t> x y)
- // cond:
- // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux16 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux32 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux64 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux8 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x16 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x32 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x64 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x8 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 <t> x y)
- // cond:
- // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16287,8 +12120,8 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64SUBLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBL (MOVLconst [c]) x)
@@ -16303,8 +12136,8 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64NEGL)
v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16368,8 +12201,8 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
break
}
v.reset(OpAMD64SUBQconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBQ (MOVQconst [c]) x)
@@ -16387,8 +12220,8 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
}
v.reset(OpAMD64NEGQ)
v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16440,12 +12273,12 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
// cond:
// result: (MOVQconst [d-c])
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
d := v_0.AuxInt
- c := v.AuxInt
v.reset(OpAMD64MOVQconst)
v.AuxInt = d - c
return true
@@ -16454,13 +12287,13 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
// cond: is32Bit(-c-d)
// result: (ADDQconst [-c-d] x)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBQconst {
break
}
- x := v_0.Args[0]
d := v_0.AuxInt
- c := v.AuxInt
+ x := v_0.Args[0]
if !(is32Bit(-c - d)) {
break
}
@@ -16471,6 +12304,4370 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ x x)
+ // cond:
+ // result: (MOVQconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAQ {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpAMD64LEAQ)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (ANDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Avg64u x y)
+ // cond:
+ // result: (AVGQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64AVGQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64BSWAPL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap64 x)
+ // cond:
+ // result: (BSWAPQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64BSWAPQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (NOTQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVSSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVSSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVQconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVSDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVSDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVLconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVQconst [0])
+ for {
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVQconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64MOVQconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz16 <t> x)
+ // cond:
+ // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVWEQconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz32 <t> x)
+ // cond:
+ // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVLEQconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz64 <t> x)
+ // cond:
+ // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVQEQconst)
+ v.AuxInt = 64
+ v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CVTTSS2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSS2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (CVTTSS2SQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSS2SQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (CVTSS2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSS2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CVTSL2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSL2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CVTSL2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSL2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CVTTSD2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSD2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (CVTSD2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSD2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (CVTTSD2SQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSD2SQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (CVTSQ2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSQ2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (CVTSQ2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSQ2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpAMD64CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (Select0 (DIVW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (Select0 (DIVWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (Select0 (DIVL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64DIVSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (Select0 (DIVLU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (Select0 (DIVQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64DIVSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (Select0 (DIVQU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (SETGE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (SETAE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpAMD64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(OpAMD64LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpAMD64CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (SETG (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (SETA (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (HMULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (HMULWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64 x y)
+ // cond:
+ // result: (HMULQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64u x y)
+ // cond:
+ // result: (HMULQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (HMULB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (HMULBU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULBU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPQ idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond:
+ // result: (SETNE (TESTQ p p))
+ for {
+ p := v.Args[0]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (SETBE (CMPQ idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (SETLE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (SETBE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (SETL (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (SETB (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVQload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (ROLWconst <t> [c&15] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.Type = t
+ v.AuxInt = c & 15
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (ROLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot64 <t> x [c])
+ // cond:
+ // result: (ROLQconst <t> [c&63] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.Type = t
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (ROLBconst <t> [c&7] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.Type = t
+ v.AuxInt = c & 7
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (Select1 (DIVW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (Select1 (DIVWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (Select1 (DIVL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (Select1 (DIVLU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (Select1 (DIVQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (Select1 (DIVQU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVQstore dst (MOVQload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 16
+ // result: (MOVOstore dst (MOVOload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 16) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
+ // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = SizeAndAlign(s).Size() - 8
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0.AuxInt = SizeAndAlign(s).Size() - 8
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 16
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 16
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 16
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 16
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpAMD64DUFFCOPY)
+ v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
+ // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
+ break
+ }
+ v.reset(OpAMD64REPMOVSQ)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = SizeAndAlign(s).Size() / 8
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MULQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEGQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORLconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDQconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (ORQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16875,203 +17072,6 @@ func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
return true
}
}
-func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORL x (MOVLconst [c]))
- // cond:
- // result: (XORLconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (MOVLconst [c]) x)
- // cond:
- // result: (XORLconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL x x)
- // cond:
- // result: (MOVLconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORLconst [c] (XORLconst [d] x))
- // cond:
- // result: (XORLconst [c ^ d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64XORLconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] x)
- // cond: int32(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int32(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] (MOVLconst [d]))
- // cond:
- // result: (MOVLconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORQ x x)
- // cond:
- // result: (MOVQconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORQconst [c] (XORQconst [d] x))
- // cond:
- // result: (XORQconst [c ^ d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64XORQconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- // match: (XORQconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORQconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -17306,8 +17306,8 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.reset(OpZero)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
- v0.AddArg(destptr)
v0.AuxInt = SizeAndAlign(s).Size() % 8
+ v0.AddArg(destptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
v1.AuxInt = 0
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index eb000d7460..a4659e40bd 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -70,32 +70,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMANDshiftRL(v, config)
case OpARMANDshiftRLreg:
return rewriteValueARM_OpARMANDshiftRLreg(v, config)
- case OpAdd16:
- return rewriteValueARM_OpAdd16(v, config)
- case OpAdd32:
- return rewriteValueARM_OpAdd32(v, config)
- case OpAdd32F:
- return rewriteValueARM_OpAdd32F(v, config)
- case OpAdd32carry:
- return rewriteValueARM_OpAdd32carry(v, config)
- case OpAdd32withcarry:
- return rewriteValueARM_OpAdd32withcarry(v, config)
- case OpAdd64F:
- return rewriteValueARM_OpAdd64F(v, config)
- case OpAdd8:
- return rewriteValueARM_OpAdd8(v, config)
- case OpAddPtr:
- return rewriteValueARM_OpAddPtr(v, config)
- case OpAddr:
- return rewriteValueARM_OpAddr(v, config)
- case OpAnd16:
- return rewriteValueARM_OpAnd16(v, config)
- case OpAnd32:
- return rewriteValueARM_OpAnd32(v, config)
- case OpAnd8:
- return rewriteValueARM_OpAnd8(v, config)
- case OpAndB:
- return rewriteValueARM_OpAndB(v, config)
case OpARMBIC:
return rewriteValueARM_OpARMBIC(v, config)
case OpARMBICconst:
@@ -136,6 +110,258 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpARMCMPshiftRL(v, config)
case OpARMCMPshiftRLreg:
return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
+ case OpARMDIV:
+ return rewriteValueARM_OpARMDIV(v, config)
+ case OpARMDIVU:
+ return rewriteValueARM_OpARMDIVU(v, config)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v, config)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v, config)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v, config)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v, config)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v, config)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v, config)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v, config)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v, config)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v, config)
+ case OpARMMOVBUload:
+ return rewriteValueARM_OpARMMOVBUload(v, config)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v, config)
+ case OpARMMOVBload:
+ return rewriteValueARM_OpARMMOVBload(v, config)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v, config)
+ case OpARMMOVBstore:
+ return rewriteValueARM_OpARMMOVBstore(v, config)
+ case OpARMMOVDload:
+ return rewriteValueARM_OpARMMOVDload(v, config)
+ case OpARMMOVDstore:
+ return rewriteValueARM_OpARMMOVDstore(v, config)
+ case OpARMMOVFload:
+ return rewriteValueARM_OpARMMOVFload(v, config)
+ case OpARMMOVFstore:
+ return rewriteValueARM_OpARMMOVFstore(v, config)
+ case OpARMMOVHUload:
+ return rewriteValueARM_OpARMMOVHUload(v, config)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v, config)
+ case OpARMMOVHload:
+ return rewriteValueARM_OpARMMOVHload(v, config)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v, config)
+ case OpARMMOVHstore:
+ return rewriteValueARM_OpARMMOVHstore(v, config)
+ case OpARMMOVWload:
+ return rewriteValueARM_OpARMMOVWload(v, config)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v, config)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v, config)
+ case OpARMMOVWstore:
+ return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v, config)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v, config)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v, config)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v, config)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v, config)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v, config)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v, config)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v, config)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v, config)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v, config)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v, config)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v, config)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v, config)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v, config)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v, config)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v, config)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v, config)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v, config)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v, config)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v, config)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v, config)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v, config)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v, config)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v, config)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v, config)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v, config)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v, config)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v, config)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v, config)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v, config)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v, config)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v, config)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v, config)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v, config)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v, config)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v, config)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v, config)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v, config)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v, config)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v, config)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v, config)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v, config)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v, config)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v, config)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v, config)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v, config)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v, config)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v, config)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v, config)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v, config)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v, config)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v, config)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v, config)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v, config)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v, config)
+ case OpAdd16:
+ return rewriteValueARM_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValueARM_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueARM_OpAdd32F(v, config)
+ case OpAdd32carry:
+ return rewriteValueARM_OpAdd32carry(v, config)
+ case OpAdd32withcarry:
+ return rewriteValueARM_OpAdd32withcarry(v, config)
+ case OpAdd64F:
+ return rewriteValueARM_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueARM_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueARM_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValueARM_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueARM_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueARM_OpAnd32(v, config)
+ case OpAnd8:
+ return rewriteValueARM_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueARM_OpAndB(v, config)
case OpClosureCall:
return rewriteValueARM_OpClosureCall(v, config)
case OpCom16:
@@ -180,10 +406,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpCvt64Fto32F(v, config)
case OpCvt64Fto32U:
return rewriteValueARM_OpCvt64Fto32U(v, config)
- case OpARMDIV:
- return rewriteValueARM_OpARMDIV(v, config)
- case OpARMDIVU:
- return rewriteValueARM_OpARMDIVU(v, config)
case OpDeferCall:
return rewriteValueARM_OpDeferCall(v, config)
case OpDiv16:
@@ -216,8 +438,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpEqB(v, config)
case OpEqPtr:
return rewriteValueARM_OpEqPtr(v, config)
- case OpARMEqual:
- return rewriteValueARM_OpARMEqual(v, config)
case OpGeq16:
return rewriteValueARM_OpGeq16(v, config)
case OpGeq16U:
@@ -254,14 +474,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpGreater8(v, config)
case OpGreater8U:
return rewriteValueARM_OpGreater8U(v, config)
- case OpARMGreaterEqual:
- return rewriteValueARM_OpARMGreaterEqual(v, config)
- case OpARMGreaterEqualU:
- return rewriteValueARM_OpARMGreaterEqualU(v, config)
- case OpARMGreaterThan:
- return rewriteValueARM_OpARMGreaterThan(v, config)
- case OpARMGreaterThanU:
- return rewriteValueARM_OpARMGreaterThanU(v, config)
case OpHmul16:
return rewriteValueARM_OpHmul16(v, config)
case OpHmul16u:
@@ -314,14 +526,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpLess8(v, config)
case OpLess8U:
return rewriteValueARM_OpLess8U(v, config)
- case OpARMLessEqual:
- return rewriteValueARM_OpARMLessEqual(v, config)
- case OpARMLessEqualU:
- return rewriteValueARM_OpARMLessEqualU(v, config)
- case OpARMLessThan:
- return rewriteValueARM_OpARMLessThan(v, config)
- case OpARMLessThanU:
- return rewriteValueARM_OpARMLessThanU(v, config)
case OpLoad:
return rewriteValueARM_OpLoad(v, config)
case OpLrot16:
@@ -354,74 +558,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueARM_OpLsh8x8(v, config)
- case OpARMMOVBUload:
- return rewriteValueARM_OpARMMOVBUload(v, config)
- case OpARMMOVBUreg:
- return rewriteValueARM_OpARMMOVBUreg(v, config)
- case OpARMMOVBload:
- return rewriteValueARM_OpARMMOVBload(v, config)
- case OpARMMOVBreg:
- return rewriteValueARM_OpARMMOVBreg(v, config)
- case OpARMMOVBstore:
- return rewriteValueARM_OpARMMOVBstore(v, config)
- case OpARMMOVDload:
- return rewriteValueARM_OpARMMOVDload(v, config)
- case OpARMMOVDstore:
- return rewriteValueARM_OpARMMOVDstore(v, config)
- case OpARMMOVFload:
- return rewriteValueARM_OpARMMOVFload(v, config)
- case OpARMMOVFstore:
- return rewriteValueARM_OpARMMOVFstore(v, config)
- case OpARMMOVHUload:
- return rewriteValueARM_OpARMMOVHUload(v, config)
- case OpARMMOVHUreg:
- return rewriteValueARM_OpARMMOVHUreg(v, config)
- case OpARMMOVHload:
- return rewriteValueARM_OpARMMOVHload(v, config)
- case OpARMMOVHreg:
- return rewriteValueARM_OpARMMOVHreg(v, config)
- case OpARMMOVHstore:
- return rewriteValueARM_OpARMMOVHstore(v, config)
- case OpARMMOVWload:
- return rewriteValueARM_OpARMMOVWload(v, config)
- case OpARMMOVWloadidx:
- return rewriteValueARM_OpARMMOVWloadidx(v, config)
- case OpARMMOVWloadshiftLL:
- return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
- case OpARMMOVWloadshiftRA:
- return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
- case OpARMMOVWloadshiftRL:
- return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
- case OpARMMOVWreg:
- return rewriteValueARM_OpARMMOVWreg(v, config)
- case OpARMMOVWstore:
- return rewriteValueARM_OpARMMOVWstore(v, config)
- case OpARMMOVWstoreidx:
- return rewriteValueARM_OpARMMOVWstoreidx(v, config)
- case OpARMMOVWstoreshiftLL:
- return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
- case OpARMMOVWstoreshiftRA:
- return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
- case OpARMMOVWstoreshiftRL:
- return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
- case OpARMMUL:
- return rewriteValueARM_OpARMMUL(v, config)
- case OpARMMULA:
- return rewriteValueARM_OpARMMULA(v, config)
- case OpARMMVN:
- return rewriteValueARM_OpARMMVN(v, config)
- case OpARMMVNshiftLL:
- return rewriteValueARM_OpARMMVNshiftLL(v, config)
- case OpARMMVNshiftLLreg:
- return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
- case OpARMMVNshiftRA:
- return rewriteValueARM_OpARMMVNshiftRA(v, config)
- case OpARMMVNshiftRAreg:
- return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
- case OpARMMVNshiftRL:
- return rewriteValueARM_OpARMMVNshiftRL(v, config)
- case OpARMMVNshiftRLreg:
- return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
case OpMod16:
return rewriteValueARM_OpMod16(v, config)
case OpMod16u:
@@ -476,24 +612,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpNilCheck(v, config)
case OpNot:
return rewriteValueARM_OpNot(v, config)
- case OpARMNotEqual:
- return rewriteValueARM_OpARMNotEqual(v, config)
- case OpARMOR:
- return rewriteValueARM_OpARMOR(v, config)
- case OpARMORconst:
- return rewriteValueARM_OpARMORconst(v, config)
- case OpARMORshiftLL:
- return rewriteValueARM_OpARMORshiftLL(v, config)
- case OpARMORshiftLLreg:
- return rewriteValueARM_OpARMORshiftLLreg(v, config)
- case OpARMORshiftRA:
- return rewriteValueARM_OpARMORshiftRA(v, config)
- case OpARMORshiftRAreg:
- return rewriteValueARM_OpARMORshiftRAreg(v, config)
- case OpARMORshiftRL:
- return rewriteValueARM_OpARMORshiftRL(v, config)
- case OpARMORshiftRLreg:
- return rewriteValueARM_OpARMORshiftRLreg(v, config)
case OpOffPtr:
return rewriteValueARM_OpOffPtr(v, config)
case OpOr16:
@@ -504,48 +622,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpOr8(v, config)
case OpOrB:
return rewriteValueARM_OpOrB(v, config)
- case OpARMRSB:
- return rewriteValueARM_OpARMRSB(v, config)
- case OpARMRSBSshiftLL:
- return rewriteValueARM_OpARMRSBSshiftLL(v, config)
- case OpARMRSBSshiftLLreg:
- return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
- case OpARMRSBSshiftRA:
- return rewriteValueARM_OpARMRSBSshiftRA(v, config)
- case OpARMRSBSshiftRAreg:
- return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
- case OpARMRSBSshiftRL:
- return rewriteValueARM_OpARMRSBSshiftRL(v, config)
- case OpARMRSBSshiftRLreg:
- return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
- case OpARMRSBconst:
- return rewriteValueARM_OpARMRSBconst(v, config)
- case OpARMRSBshiftLL:
- return rewriteValueARM_OpARMRSBshiftLL(v, config)
- case OpARMRSBshiftLLreg:
- return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
- case OpARMRSBshiftRA:
- return rewriteValueARM_OpARMRSBshiftRA(v, config)
- case OpARMRSBshiftRAreg:
- return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
- case OpARMRSBshiftRL:
- return rewriteValueARM_OpARMRSBshiftRL(v, config)
- case OpARMRSBshiftRLreg:
- return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
- case OpARMRSCconst:
- return rewriteValueARM_OpARMRSCconst(v, config)
- case OpARMRSCshiftLL:
- return rewriteValueARM_OpARMRSCshiftLL(v, config)
- case OpARMRSCshiftLLreg:
- return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
- case OpARMRSCshiftRA:
- return rewriteValueARM_OpARMRSCshiftRA(v, config)
- case OpARMRSCshiftRAreg:
- return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
- case OpARMRSCshiftRL:
- return rewriteValueARM_OpARMRSCshiftRL(v, config)
- case OpARMRSCshiftRLreg:
- return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
case OpRsh16Ux16:
return rewriteValueARM_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -594,66 +670,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValueARM_OpRsh8x8(v, config)
- case OpARMSBC:
- return rewriteValueARM_OpARMSBC(v, config)
- case OpARMSBCconst:
- return rewriteValueARM_OpARMSBCconst(v, config)
- case OpARMSBCshiftLL:
- return rewriteValueARM_OpARMSBCshiftLL(v, config)
- case OpARMSBCshiftLLreg:
- return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
- case OpARMSBCshiftRA:
- return rewriteValueARM_OpARMSBCshiftRA(v, config)
- case OpARMSBCshiftRAreg:
- return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
- case OpARMSBCshiftRL:
- return rewriteValueARM_OpARMSBCshiftRL(v, config)
- case OpARMSBCshiftRLreg:
- return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
- case OpARMSLL:
- return rewriteValueARM_OpARMSLL(v, config)
- case OpARMSLLconst:
- return rewriteValueARM_OpARMSLLconst(v, config)
- case OpARMSRA:
- return rewriteValueARM_OpARMSRA(v, config)
- case OpARMSRAcond:
- return rewriteValueARM_OpARMSRAcond(v, config)
- case OpARMSRAconst:
- return rewriteValueARM_OpARMSRAconst(v, config)
- case OpARMSRL:
- return rewriteValueARM_OpARMSRL(v, config)
- case OpARMSRLconst:
- return rewriteValueARM_OpARMSRLconst(v, config)
- case OpARMSUB:
- return rewriteValueARM_OpARMSUB(v, config)
- case OpARMSUBS:
- return rewriteValueARM_OpARMSUBS(v, config)
- case OpARMSUBSshiftLL:
- return rewriteValueARM_OpARMSUBSshiftLL(v, config)
- case OpARMSUBSshiftLLreg:
- return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
- case OpARMSUBSshiftRA:
- return rewriteValueARM_OpARMSUBSshiftRA(v, config)
- case OpARMSUBSshiftRAreg:
- return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
- case OpARMSUBSshiftRL:
- return rewriteValueARM_OpARMSUBSshiftRL(v, config)
- case OpARMSUBSshiftRLreg:
- return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
- case OpARMSUBconst:
- return rewriteValueARM_OpARMSUBconst(v, config)
- case OpARMSUBshiftLL:
- return rewriteValueARM_OpARMSUBshiftLL(v, config)
- case OpARMSUBshiftLLreg:
- return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
- case OpARMSUBshiftRA:
- return rewriteValueARM_OpARMSUBshiftRA(v, config)
- case OpARMSUBshiftRAreg:
- return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
- case OpARMSUBshiftRL:
- return rewriteValueARM_OpARMSUBshiftRL(v, config)
- case OpARMSUBshiftRLreg:
- return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
case OpSignExt16to32:
return rewriteValueARM_OpSignExt16to32(v, config)
case OpSignExt8to16:
@@ -690,22 +706,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
return rewriteValueARM_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValueARM_OpTrunc32to8(v, config)
- case OpARMXOR:
- return rewriteValueARM_OpARMXOR(v, config)
- case OpARMXORconst:
- return rewriteValueARM_OpARMXORconst(v, config)
- case OpARMXORshiftLL:
- return rewriteValueARM_OpARMXORshiftLL(v, config)
- case OpARMXORshiftLLreg:
- return rewriteValueARM_OpARMXORshiftLLreg(v, config)
- case OpARMXORshiftRA:
- return rewriteValueARM_OpARMXORshiftRA(v, config)
- case OpARMXORshiftRAreg:
- return rewriteValueARM_OpARMXORshiftRAreg(v, config)
- case OpARMXORshiftRL:
- return rewriteValueARM_OpARMXORshiftRL(v, config)
- case OpARMXORshiftRLreg:
- return rewriteValueARM_OpARMXORshiftRLreg(v, config)
case OpXor16:
return rewriteValueARM_OpXor16(v, config)
case OpXor32:
@@ -775,9 +775,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -794,9 +794,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -813,9 +813,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -832,9 +832,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -851,9 +851,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -870,9 +870,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1040,19 +1040,19 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1061,17 +1061,17 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1114,9 +1114,9 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1129,19 +1129,19 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1150,17 +1150,17 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1203,9 +1203,9 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1218,19 +1218,19 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1239,17 +1239,17 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1292,9 +1292,9 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1345,9 +1345,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SLLconst [c] y) x)
@@ -1362,9 +1362,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SRLconst [c] y))
@@ -1379,9 +1379,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SRLconst [c] y) x)
@@ -1396,9 +1396,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SRAconst [c] y))
@@ -1413,9 +1413,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SRAconst [c] y) x)
@@ -1430,9 +1430,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SLL y z))
@@ -1654,9 +1654,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SLLconst [c] y) x)
@@ -1671,9 +1671,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SRLconst [c] y))
@@ -1688,9 +1688,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SRLconst [c] y) x)
@@ -1705,9 +1705,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SRAconst [c] y))
@@ -1722,9 +1722,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SRAconst [c] y) x)
@@ -1739,9 +1739,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SLL y z))
@@ -1855,18 +1855,18 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1874,16 +1874,16 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -1922,9 +1922,9 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -1936,18 +1936,18 @@ func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1955,16 +1955,16 @@ func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2003,9 +2003,9 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2017,18 +2017,18 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2036,16 +2036,16 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2084,9 +2084,9 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2196,18 +2196,18 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2215,16 +2215,16 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2263,9 +2263,9 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2277,18 +2277,18 @@ func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2296,16 +2296,16 @@ func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ADDconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2344,9 +2344,9 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2358,18 +2358,18 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2377,16 +2377,16 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ADDconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2425,9 +2425,9 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2477,9 +2477,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SLLconst [c] y) x)
@@ -2494,9 +2494,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SRLconst [c] y))
@@ -2511,9 +2511,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SRLconst [c] y) x)
@@ -2528,9 +2528,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SRAconst [c] y))
@@ -2545,9 +2545,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SRAconst [c] y) x)
@@ -2562,9 +2562,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SLL y z))
@@ -2706,12 +2706,12 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
if v_1.Op != OpARMMVNshiftLL {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (MVNshiftRL y [c]))
@@ -2723,12 +2723,12 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
if v_1.Op != OpARMMVNshiftRL {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (MVNshiftRA y [c]))
@@ -2740,12 +2740,12 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
if v_1.Op != OpARMMVNshiftRA {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2817,18 +2817,18 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2836,32 +2836,32 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftLL x y:(SLLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSLLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -2906,9 +2906,9 @@ func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2920,18 +2920,18 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2939,32 +2939,32 @@ func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ANDconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftRA x y:(SRAconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRAconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3009,9 +3009,9 @@ func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3023,18 +3023,18 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -3042,32 +3042,32 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ANDconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftRL x y:(SRLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3112,209 +3112,12 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMANDshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32carry x y)
- // cond:
- // result: (ADDS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32withcarry x y c)
- // cond:
- // result: (ADC x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(OpARMADC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (MOVWaddr {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpARMMOVWaddr)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
v.AddArg(x)
v.AddArg(y)
return true
}
+ return false
}
func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
b := v.Block
@@ -3346,9 +3149,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SRLconst [c] y))
@@ -3363,9 +3166,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SRAconst [c] y))
@@ -3380,9 +3183,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SLL y z))
@@ -3501,32 +3304,32 @@ func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
// cond:
// result: (BICconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3551,9 +3354,9 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3565,32 +3368,32 @@ func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
// cond:
// result: (BICconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3615,9 +3418,9 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3629,32 +3432,32 @@ func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
// cond:
// result: (BICconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3679,9 +3482,9 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3693,11 +3496,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagEQ {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3720,11 +3523,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagLT_UGT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3747,11 +3550,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagGT_UGT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3760,17 +3563,17 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVWLSconst x flags [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMInvertFlags {
break
}
flags := v_1.Args[0]
- c := v.AuxInt
v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(flags)
- v.AuxInt = c
return true
}
return false
@@ -3782,11 +3585,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagEQ {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3795,11 +3598,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagLT_ULT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3822,11 +3625,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagGT_ULT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3849,17 +3652,17 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVWHSconst x flags [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMInvertFlags {
break
}
flags := v_1.Args[0]
- c := v.AuxInt
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(flags)
- v.AuxInt = c
return true
}
return false
@@ -3911,9 +3714,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SLLconst [c] y) x)
@@ -3929,9 +3732,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -3947,9 +3750,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SRLconst [c] y) x)
@@ -3965,9 +3768,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -3983,9 +3786,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SRAconst [c] y) x)
@@ -4001,9 +3804,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -4166,12 +3969,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -4182,12 +3985,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -4198,12 +4001,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -4214,12 +4017,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -4230,12 +4033,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -4246,11 +4049,11 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0xff < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVBUreg {
break
}
- c := v.AuxInt
if !(0xff < c) {
break
}
@@ -4261,11 +4064,11 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0xffff < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVHUreg {
break
}
- c := v.AuxInt
if !(0xffff < c) {
break
}
@@ -4276,12 +4079,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMANDconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -4292,12 +4095,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMSRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
break
}
@@ -4313,19 +4116,19 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
// cond:
// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4334,16 +4137,16 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
// cond:
// result: (CMPconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4384,9 +4187,9 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -4398,19 +4201,19 @@ func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
// cond:
// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4419,16 +4222,16 @@ func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
// cond:
// result: (CMPconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4469,9 +4272,9 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -4483,19 +4286,19 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
// cond:
// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4504,16 +4307,16 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
// cond:
// result: (CMPconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4554,306 +4357,13 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMCALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVFconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVFconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVDconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVWconst [b])
- for {
- b := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVWconst [0])
- for {
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert x mem)
- // cond:
- // result: (MOVWconvert x mem)
- for {
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVWconvert)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (MOVFW x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFW)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32U x)
- // cond:
- // result: (MOVFWU x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFWU)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (MOVFD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Uto32F x)
- // cond:
- // result: (MOVWUF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWUF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Uto64F x)
- // cond:
- // result: (MOVWUD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWUD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (MOVWF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (MOVWD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (MOVDW x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDW)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (MOVDF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32U x)
- // cond:
- // result: (MOVDWU x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDWU)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4935,285 +4445,6 @@ func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpARMCALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (DIV x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (DIVU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (Equal (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (Equal (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (Equal (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
- v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (Equal (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5292,336 +4523,6 @@ func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (GreaterEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (GreaterEqual (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (GreaterEqualU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (GreaterEqual (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(OpARMLoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpARMCALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (GreaterThan (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (GreaterThan (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (GreaterThanU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (GreaterThan (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5934,495 +4835,6 @@ func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMULU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
- return true
- }
-}
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
- return true
- }
-}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMCALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (LessThanU (CMP idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil ptr)
- // cond:
- // result: (NotEqual (CMPconst [0] ptr))
- for {
- ptr := v.Args[0]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(ptr)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (LessEqualU (CMP idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (LessEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (GreaterEqual (CMPF y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (LessEqualU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (GreaterEqual (CMPD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (LessThan (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (GreaterThan (CMPF y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (LessThanU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (GreaterThan (CMPD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -6735,502 +5147,6 @@ func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: t.IsBoolean()
- // result: (MOVBUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean()) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && isSigned(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && isSigned(t)) {
- break
- }
- v.reset(OpARMMOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && !isSigned(t))
- // result: (MOVBUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && !isSigned(t)) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && isSigned(t))
- // result: (MOVHload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && isSigned(t)) {
- break
- }
- v.reset(OpARMMOVHload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && !isSigned(t))
- // result: (MOVHUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && !isSigned(t)) {
- break
- }
- v.reset(OpARMMOVHUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is32BitInt(t) || isPtr(t))
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpARMMOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVFload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(OpARMMOVFload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(OpARMMOVDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 15
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 16 - c&15
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 x [c])
- // cond:
- // result: (SRRconst x [32-c&31])
- for {
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMSRRconst)
- v.AddArg(x)
- v.AuxInt = 32 - c&31
- return true
- }
-}
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 7
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 8 - c&7
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh16x64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh32x64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh8x64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -8413,17 +6329,17 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftLL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8439,17 +6355,17 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8465,17 +6381,17 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRA {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8550,13 +6466,13 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSLLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8568,14 +6484,14 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSLLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8588,13 +6504,13 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8606,14 +6522,14 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8626,13 +6542,13 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRAconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8644,14 +6560,14 @@ func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRAconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8664,18 +6580,18 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftLL {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8689,13 +6605,13 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
// cond:
// result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) << uint64(d))
@@ -8712,18 +6628,18 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftRA {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8737,13 +6653,13 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
// cond:
// result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(int32(c) >> uint64(d))
@@ -8760,18 +6676,18 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftRL {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8785,13 +6701,13 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
// cond:
// result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) >> uint64(d))
@@ -8920,18 +6836,18 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftLL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -8948,18 +6864,18 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -8976,18 +6892,18 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
if v_0.Op != OpARMADDshiftRA {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9044,14 +6960,14 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSLLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9064,15 +6980,15 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSLLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9086,14 +7002,14 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9106,15 +7022,15 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9128,14 +7044,14 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_1.Op != OpARMSRAconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9148,15 +7064,15 @@ func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
if v_0.Op != OpARMSRAconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9170,13 +7086,13 @@ func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
// cond:
// result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9195,13 +7111,13 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
// cond:
// result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9220,13 +7136,13 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
// cond:
// result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9323,9 +7239,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMADDshiftLL)
+ v.AuxInt = log2(c - 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c - 1)
return true
}
// match: (MUL x (MOVWconst [c]))
@@ -9342,9 +7258,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = log2(c + 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c + 1)
return true
}
// match: (MUL x (MOVWconst [c]))
@@ -9363,9 +7279,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 1
v.AddArg(v0)
return true
}
@@ -9385,9 +7301,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 2
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 2
v.AddArg(v0)
return true
}
@@ -9407,9 +7323,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9429,9 +7345,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9517,9 +7433,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMADDshiftLL)
+ v.AuxInt = log2(c - 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c - 1)
return true
}
// match: (MUL (MOVWconst [c]) x)
@@ -9536,9 +7452,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
break
}
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = log2(c + 1)
v.AddArg(x)
v.AddArg(x)
- v.AuxInt = log2(c + 1)
return true
}
// match: (MUL (MOVWconst [c]) x)
@@ -9557,9 +7473,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 1
v.AddArg(v0)
return true
}
@@ -9579,9 +7495,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 2
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 2
v.AddArg(v0)
return true
}
@@ -9601,9 +7517,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9623,9 +7539,9 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 3
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 3
v.AddArg(v0)
return true
}
@@ -9744,9 +7660,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c - 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9767,9 +7683,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c + 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9792,9 +7708,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 1
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 1
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9818,9 +7734,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 2
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 2
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9844,9 +7760,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9870,9 +7786,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9970,9 +7886,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c - 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9993,9 +7909,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = log2(c + 1)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -10018,9 +7934,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 1
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 1
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10044,9 +7960,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 2
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 2
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10070,9 +7986,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10096,9 +8012,9 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 3
v1.AddArg(x)
v1.AddArg(x)
- v1.AuxInt = 3
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10153,8 +8069,8 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftLL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SRLconst [c] x))
@@ -10168,8 +8084,8 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftRL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SRAconst [c] x))
@@ -10183,8 +8099,8 @@ func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftRA)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SLL x y))
@@ -10241,12 +8157,12 @@ func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(uint32(c) << uint64(d))
return true
@@ -10267,8 +8183,8 @@ func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftLL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -10280,12 +8196,12 @@ func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [^int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(int32(c) >> uint64(d))
return true
@@ -10306,8 +8222,8 @@ func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftRA)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -10319,12 +8235,12 @@ func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
// cond:
// result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(uint32(c) >> uint64(d))
return true
@@ -10345,692 +8261,12 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftRL)
- v.AddArg(x)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (MOD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (MODU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMOD)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMODU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
- for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBUload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore dst (MOVHUload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 1
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 1
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
- break
- }
- v.reset(OpARMMOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
- // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
- break
- }
- v.reset(OpARMMOVHstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 2
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AuxInt = 2
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AuxInt = 1
- v3.AddArg(dst)
- v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v4.AuxInt = 1
- v4.AddArg(src)
- v4.AddArg(mem)
- v3.AddArg(v4)
- v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v5.AddArg(dst)
- v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v6.AddArg(src)
- v6.AddArg(mem)
- v5.AddArg(v6)
- v5.AddArg(mem)
- v3.AddArg(v5)
- v1.AddArg(v3)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
- break
- }
- v.reset(OpARMMOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v1.AuxInt = 1
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v2.AuxInt = 1
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
- v3.AddArg(dst)
- v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
- v4.AddArg(src)
- v4.AddArg(mem)
- v3.AddArg(v4)
- v3.AddArg(mem)
- v1.AddArg(v3)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(OpARMDUFFCOPY)
- v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
- // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
- break
- }
- v.reset(OpARMLoweredMove)
- v.AuxInt = SizeAndAlign(s).Align()
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
- v0.AddArg(src)
- v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
- v.AddArg(v0)
- v.AddArg(mem)
return true
}
return false
}
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32uhilo x y)
- // cond:
- // result: (MULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMULD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (RSBconst [0] x)
- for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (RSBconst [0] x)
- for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (NEGF x)
- for {
- x := v.Args[0]
- v.reset(OpARMNEGF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (NEGD x)
- for {
- x := v.Args[0]
- v.reset(OpARMNEGD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (RSBconst [0] x)
- for {
- x := v.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = 0
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (NotEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (NotEqual (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (NotEqual (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (XOR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (NotEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMLoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORconst [1] x)
- for {
- x := v.Args[0]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -11154,9 +8390,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR (SLLconst [c] y) x)
@@ -11171,9 +8407,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR x (SRLconst [c] y))
@@ -11188,9 +8424,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR (SRLconst [c] y) x)
@@ -11205,9 +8441,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR x (SRAconst [c] y))
@@ -11222,9 +8458,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR (SRAconst [c] y) x)
@@ -11239,9 +8475,9 @@ func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (OR x (SLL y z))
@@ -11428,18 +8664,18 @@ func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ORconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -11447,32 +8683,32 @@ func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (ORconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (ORshiftLL x y:(SLLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSLLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -11517,9 +8753,9 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -11531,18 +8767,18 @@ func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ORconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -11550,32 +8786,32 @@ func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (ORconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMORconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ORshiftRA x y:(SRAconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRAconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -11620,9 +8856,9 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -11634,18 +8870,18 @@ func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ORconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -11653,32 +8889,32 @@ func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (ORconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ORshiftRL x y:(SRLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -11723,101 +8959,12 @@ func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OffPtr [off] ptr:(SP))
- // cond:
- // result: (MOVWaddr [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- if ptr.Op != OpSP {
- break
- }
- v.reset(OpARMMOVWaddr)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDconst [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpARMADDconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (OR x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMOR)
v.AddArg(x)
v.AddArg(y)
return true
}
+ return false
}
func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
b := v.Block
@@ -11864,9 +9011,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB (SLLconst [c] y) x)
@@ -11881,9 +9028,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB x (SRLconst [c] y))
@@ -11898,9 +9045,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB (SRLconst [c] y) x)
@@ -11915,9 +9062,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB x (SRAconst [c] y))
@@ -11932,9 +9079,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB (SRAconst [c] y) x)
@@ -11949,9 +9096,9 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (RSB x (SLL y z))
@@ -12077,18 +9224,18 @@ func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12096,16 +9243,16 @@ func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -12144,9 +9291,9 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12158,18 +9305,18 @@ func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12177,16 +9324,16 @@ func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -12225,9 +9372,9 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12239,18 +9386,18 @@ func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12258,16 +9405,16 @@ func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -12306,9 +9453,9 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12387,18 +9534,18 @@ func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12406,32 +9553,32 @@ func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (RSBshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -12475,9 +9622,9 @@ func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12489,18 +9636,18 @@ func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12508,32 +9655,32 @@ func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (RSBshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -12577,9 +9724,9 @@ func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12591,18 +9738,18 @@ func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMSUBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12610,32 +9757,32 @@ func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMRSBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (RSBshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -12679,9 +9826,9 @@ func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -12734,19 +9881,19 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -12755,17 +9902,17 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -12808,9 +9955,9 @@ func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -12823,19 +9970,19 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -12844,17 +9991,17 @@ func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -12897,9 +10044,9 @@ func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -12912,19 +10059,19 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -12933,17 +10080,17 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -12986,668 +10133,14 @@ func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMRSCshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v3.AuxInt = 256
- v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v4.AddArg(y)
- v3.AddArg(v4)
- v.AddArg(v3)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v0.AddArg(y)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v2.AddArg(y)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = c + 16
- return true
- }
- // match: (Rsh16Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 x y)
- // cond:
- // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 x y)
- // cond:
- // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- return true
- }
-}
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 x y)
- // cond:
- // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v.AddArg(y)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = c + 16
- return true
- }
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 x y)
- // cond:
- // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SRLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSRLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 x y)
- // cond:
- // result: (SRL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 x y)
- // cond:
- // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 x y)
- // cond:
- // result: (SRAcond x y (CMPconst [256] y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v.AddArg(x)
- v.AddArg(y)
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 256
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SRAconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpARMSRAconst)
v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 x y)
- // cond:
- // result: (SRA x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v3.AuxInt = 256
- v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v4.AddArg(y)
- v3.AddArg(v4)
- v.AddArg(v3)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 x y)
- // cond:
- // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v0.AddArg(y)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v2.AddArg(y)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = c + 24
- return true
- }
- // match: (Rsh8Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 x y)
- // cond:
- // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 x y)
- // cond:
- // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- return true
- }
-}
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 x y)
- // cond:
- // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
v.AddArg(y)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = c + 24
- return true
- }
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
- v.AuxInt = 31
+ v.AddArg(flags)
return true
}
return false
}
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 x y)
- // cond:
- // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -13698,9 +10191,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13717,9 +10210,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13736,9 +10229,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13755,9 +10248,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMRSCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13774,9 +10267,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13793,9 +10286,9 @@ func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -13963,19 +10456,19 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -13984,17 +10477,17 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -14037,9 +10530,9 @@ func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -14052,19 +10545,19 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -14073,17 +10566,17 @@ func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -14126,9 +10619,9 @@ func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -14141,19 +10634,19 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMRSCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -14162,17 +10655,17 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMSBCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -14215,9 +10708,9 @@ func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -14237,8 +10730,8 @@ func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMSLLconst)
- v.AddArg(x)
v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
return false
@@ -14276,8 +10769,8 @@ func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
return false
@@ -14295,8 +10788,8 @@ func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
// match: (SRAcond x y (FlagLT_ULT))
@@ -14324,8 +10817,8 @@ func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
// match: (SRAcond x y (FlagGT_ULT))
@@ -14353,8 +10846,8 @@ func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
return false
@@ -14392,8 +10885,8 @@ func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpARMSRLconst)
- v.AddArg(x)
v.AuxInt = c & 31
+ v.AddArg(x)
return true
}
return false
@@ -14462,9 +10955,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB (SLLconst [c] y) x)
@@ -14479,9 +10972,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB x (SRLconst [c] y))
@@ -14496,9 +10989,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB (SRLconst [c] y) x)
@@ -14513,9 +11006,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB x (SRAconst [c] y))
@@ -14530,9 +11023,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB (SRAconst [c] y) x)
@@ -14547,9 +11040,9 @@ func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUB x (SLL y z))
@@ -14713,9 +11206,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS (SLLconst [c] y) x)
@@ -14730,9 +11223,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS x (SRLconst [c] y))
@@ -14747,9 +11240,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS (SRLconst [c] y) x)
@@ -14764,9 +11257,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS x (SRAconst [c] y))
@@ -14781,9 +11274,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS (SRAconst [c] y) x)
@@ -14798,9 +11291,9 @@ func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (SUBS x (SLL y z))
@@ -14914,18 +11407,18 @@ func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -14933,16 +11426,16 @@ func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -14981,9 +11474,9 @@ func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -14995,18 +11488,18 @@ func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15014,16 +11507,16 @@ func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -15062,9 +11555,9 @@ func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15076,18 +11569,18 @@ func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15095,16 +11588,16 @@ func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -15143,9 +11636,9 @@ func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15237,18 +11730,18 @@ func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15256,32 +11749,32 @@ func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (SUBshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -15325,9 +11818,9 @@ func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15339,18 +11832,18 @@ func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (RSBconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15358,32 +11851,32 @@ func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
// cond:
// result: (SUBconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (SUBshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -15427,9 +11920,9 @@ func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -15441,18 +11934,18 @@ func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (RSBconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMRSBconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -15460,32 +11953,32 @@ func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
// cond:
// result: (SUBconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMSUBconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (SUBshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -15529,353 +12022,12 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMSUBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SignExt16to32 x)
- // cond:
- // result: (MOVHreg x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVHreg)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SignExt8to16 x)
- // cond:
- // result: (MOVBreg x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVBreg)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SignExt8to32 x)
- // cond:
- // result: (MOVBreg x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVBreg)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Signmask x)
- // cond:
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
-}
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sqrt x)
- // cond:
- // result: (SQRTD x)
- for {
- x := v.Args[0]
- v.reset(OpARMSQRTD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (StaticCall [argwid] {target} mem)
- // cond:
- // result: (CALLstatic [argwid] {target} mem)
- for {
- argwid := v.AuxInt
- target := v.Aux
- mem := v.Args[0]
- v.reset(OpARMCALLstatic)
- v.AuxInt = argwid
- v.Aux = target
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Store [1] ptr val mem)
- // cond:
- // result: (MOVBstore ptr val mem)
- for {
- if v.AuxInt != 1 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVBstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [2] ptr val mem)
- // cond:
- // result: (MOVHstore ptr val mem)
- for {
- if v.AuxInt != 2 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMMOVHstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [4] ptr val mem)
- // cond: !is32BitFloat(val.Type)
- // result: (MOVWstore ptr val mem)
- for {
- if v.AuxInt != 4 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(!is32BitFloat(val.Type)) {
- break
- }
- v.reset(OpARMMOVWstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [4] ptr val mem)
- // cond: is32BitFloat(val.Type)
- // result: (MOVFstore ptr val mem)
- for {
- if v.AuxInt != 4 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32BitFloat(val.Type)) {
- break
- }
- v.reset(OpARMMOVFstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (Store [8] ptr val mem)
- // cond: is64BitFloat(val.Type)
- // result: (MOVDstore ptr val mem)
- for {
- if v.AuxInt != 8 {
- break
- }
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is64BitFloat(val.Type)) {
- break
- }
- v.reset(OpARMMOVDstore)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub16 x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32 x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32F x y)
- // cond:
- // result: (SUBF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUBF)
v.AddArg(x)
v.AddArg(y)
return true
}
-}
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32carry x y)
- // cond:
- // result: (SUBS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUBS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub32withcarry x y c)
- // cond:
- // result: (SBC x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(OpARMSBC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub64F x y)
- // cond:
- // result: (SUBD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUBD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Sub8 x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SubPtr x y)
- // cond:
- // result: (SUB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSUB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Trunc16to8 x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Trunc32to16 x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Trunc32to8 x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
+ return false
}
func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
b := v.Block
@@ -15922,9 +12074,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR (SLLconst [c] y) x)
@@ -15939,9 +12091,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR x (SRLconst [c] y))
@@ -15956,9 +12108,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR (SRLconst [c] y) x)
@@ -15973,9 +12125,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR x (SRAconst [c] y))
@@ -15990,9 +12142,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR (SRAconst [c] y) x)
@@ -16007,9 +12159,9 @@ func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (XOR x (SLL y z))
@@ -16183,18 +12335,18 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (XORconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16202,32 +12354,32 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
// cond:
// result: (XORconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMXORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (XORshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -16271,9 +12423,9 @@ func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -16285,18 +12437,18 @@ func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (XORconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16304,32 +12456,32 @@ func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
// cond:
// result: (XORconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMXORconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (XORshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -16373,9 +12525,9 @@ func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -16387,18 +12539,18 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (XORconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMXORconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16406,32 +12558,32 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
// cond:
// result: (XORconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMXORconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (XORshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -16475,13 +12627,3861 @@ func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
}
c := v_2.AuxInt
v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMADC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMCALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVFconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVWconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert x mem)
+ // cond:
+ // result: (MOVWconvert x mem)
+ for {
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVWconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (MOVFW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32U x)
+ // cond:
+ // result: (MOVFWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (MOVFD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto32F x)
+ // cond:
+ // result: (MOVWUF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto64F x)
+ // cond:
+ // result: (MOVWUD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (MOVWF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (MOVWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (MOVDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (MOVDF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32U x)
+ // cond:
+ // result: (MOVDWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpARMLoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMULU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMCALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 15
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 16 - c&15
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 x [c])
+ // cond:
+ // result: (SRRconst x [32-c&31])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMSRRconst)
+ v.AuxInt = 32 - c&31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 7
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 8 - c&7
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
v.AddArg(x)
v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 1
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 1
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AuxInt = 1
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 1
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
+ // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32uhilo x y)
+ // cond:
+ // result: (MULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (NEGF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMNEGF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (NEGD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMNEGD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMLoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr:(SP))
+ // cond:
+ // result: (MOVWaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 x y)
+ // cond:
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 x y)
+ // cond:
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 x y)
+ // cond:
+ // result: (SRAcond x y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v.AddArg(x)
+ v.AddArg(y)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 256
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
+ // cond:
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
return true
}
return false
}
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 x y)
+ // cond:
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 x y)
+ // cond:
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 x y)
+ // cond:
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Signmask x)
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (SQRTD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSQRTD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpARMCALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (SUBF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32carry x y)
+ // cond:
+ // result: (SUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32withcarry x y c)
+ // cond:
+ // result: (SBC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMSBC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (SUBD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16756,8 +16756,8 @@ func rewriteValueARM_OpZero(v *Value, config *Config) bool {
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
- v0.AddArg(ptr)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
v1.AuxInt = 0
@@ -16815,12 +16815,12 @@ func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
for {
x := v.Args[0]
v.reset(OpARMSRAconst)
+ v.AuxInt = 31
v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+ v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
- v0.AuxInt = 1
v.AddArg(v0)
- v.AuxInt = 31
return true
}
}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index ecde744d0f..e268f59079 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -10,6 +10,36 @@ func rewriteValueARM64(v *Value, config *Config) bool {
switch v.Op {
case OpARM64ADDconst:
return rewriteValueARM64_OpARM64ADDconst(v, config)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v, config)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v, config)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v, config)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v, config)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v, config)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v, config)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v, config)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v, config)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v, config)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v, config)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v, config)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v, config)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v, config)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v, config)
case OpAdd16:
return rewriteValueARM64_OpAdd16(v, config)
case OpAdd32:
@@ -132,14 +162,6 @@ func rewriteValueARM64(v *Value, config *Config) bool {
return rewriteValueARM64_OpEqB(v, config)
case OpEqPtr:
return rewriteValueARM64_OpEqPtr(v, config)
- case OpARM64FMOVDload:
- return rewriteValueARM64_OpARM64FMOVDload(v, config)
- case OpARM64FMOVDstore:
- return rewriteValueARM64_OpARM64FMOVDstore(v, config)
- case OpARM64FMOVSload:
- return rewriteValueARM64_OpARM64FMOVSload(v, config)
- case OpARM64FMOVSstore:
- return rewriteValueARM64_OpARM64FMOVSstore(v, config)
case OpGeq16:
return rewriteValueARM64_OpGeq16(v, config)
case OpGeq16U:
@@ -290,28 +312,6 @@ func rewriteValueARM64(v *Value, config *Config) bool {
return rewriteValueARM64_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueARM64_OpLsh8x8(v, config)
- case OpARM64MOVBUload:
- return rewriteValueARM64_OpARM64MOVBUload(v, config)
- case OpARM64MOVBload:
- return rewriteValueARM64_OpARM64MOVBload(v, config)
- case OpARM64MOVBstore:
- return rewriteValueARM64_OpARM64MOVBstore(v, config)
- case OpARM64MOVDload:
- return rewriteValueARM64_OpARM64MOVDload(v, config)
- case OpARM64MOVDstore:
- return rewriteValueARM64_OpARM64MOVDstore(v, config)
- case OpARM64MOVHUload:
- return rewriteValueARM64_OpARM64MOVHUload(v, config)
- case OpARM64MOVHload:
- return rewriteValueARM64_OpARM64MOVHload(v, config)
- case OpARM64MOVHstore:
- return rewriteValueARM64_OpARM64MOVHstore(v, config)
- case OpARM64MOVWUload:
- return rewriteValueARM64_OpARM64MOVWUload(v, config)
- case OpARM64MOVWload:
- return rewriteValueARM64_OpARM64MOVWload(v, config)
- case OpARM64MOVWstore:
- return rewriteValueARM64_OpARM64MOVWstore(v, config)
case OpMod16:
return rewriteValueARM64_OpMod16(v, config)
case OpMod16u:
@@ -542,6 +542,765 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -750,12 +1509,12 @@ func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
v.reset(OpARM64ADD)
v0 := b.NewValue0(v.Line, OpARM64ADD, t)
v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
- v1.AddArg(x)
v1.AuxInt = 1
+ v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
- v2.AddArg(y)
v2.AuxInt = 1
+ v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Line, OpARM64AND, t)
@@ -1469,210 +2228,6 @@ func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
return true
}
}
-func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (FMOVDload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64FMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64FMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (FMOVSload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64FMOVSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64FMOVSstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVSstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -2081,6 +2636,7 @@ func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 16
v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
@@ -2089,7 +2645,6 @@ func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 16
return true
}
}
@@ -2103,6 +2658,7 @@ func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRLconst)
+ v.AuxInt = 16
v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -2111,7 +2667,6 @@ func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 16
return true
}
}
@@ -2125,11 +2680,11 @@ func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 32
v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v.AuxInt = 32
return true
}
}
@@ -2143,11 +2698,11 @@ func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 32
v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v.AuxInt = 32
return true
}
}
@@ -2191,6 +2746,7 @@ func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
@@ -2199,7 +2755,6 @@ func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 8
return true
}
}
@@ -2213,6 +2768,7 @@ func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRLconst)
+ v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -2221,7 +2777,6 @@ func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 8
return true
}
}
@@ -2827,18 +3382,18 @@ func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
// result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
for {
t := v.Type
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64OR)
v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
- v0.AddArg(x)
v0.AuxInt = c & 15
+ v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 16 - c&15
v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v1.AuxInt = 16 - c&15
v.AddArg(v1)
return true
}
@@ -2850,11 +3405,11 @@ func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
// cond:
// result: (RORWconst x [32-c&31])
for {
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64RORWconst)
- v.AddArg(x)
v.AuxInt = 32 - c&31
+ v.AddArg(x)
return true
}
}
@@ -2865,11 +3420,11 @@ func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
// cond:
// result: (RORconst x [64-c&63])
for {
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64RORconst)
- v.AddArg(x)
v.AuxInt = 64 - c&63
+ v.AddArg(x)
return true
}
}
@@ -2881,18 +3436,18 @@ func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
// result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> (ZeroExt8to64 x) [8-c&7]))
for {
t := v.Type
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64OR)
v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
- v0.AddArg(x)
v0.AuxInt = c & 7
+ v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 8 - c&7
v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v1.AuxInt = 8 - c&7
v.AddArg(v1)
return true
}
@@ -2972,8 +3527,8 @@ func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh16x64 _ (MOVDconst [c]))
@@ -3118,8 +3673,8 @@ func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh32x64 _ (MOVDconst [c]))
@@ -3264,8 +3819,8 @@ func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh64x64 _ (MOVDconst [c]))
@@ -3410,8 +3965,8 @@ func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh8x64 _ (MOVDconst [c]))
@@ -3481,561 +4036,6 @@ func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
return true
}
}
-func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVBUload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVBload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVBstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVDload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVDstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVHUload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVHload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVHstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVWUload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVWUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVWUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVWload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVWstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4647,8 +4647,8 @@ func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
- v0.AddArg(src)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
return true
@@ -5179,10 +5179,10 @@ func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRLconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh16Ux64 _ (MOVDconst [c]))
@@ -5333,10 +5333,10 @@ func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh16x64 x (MOVDconst [c]))
@@ -5353,10 +5353,10 @@ func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = 63
return true
}
// match: (Rsh16x64 x y)
@@ -5491,10 +5491,10 @@ func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRLconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh32Ux64 _ (MOVDconst [c]))
@@ -5645,10 +5645,10 @@ func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh32x64 x (MOVDconst [c]))
@@ -5665,10 +5665,10 @@ func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = 63
return true
}
// match: (Rsh32x64 x y)
@@ -5799,8 +5799,8 @@ func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Rsh64Ux64 _ (MOVDconst [c]))
@@ -5943,8 +5943,8 @@ func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Rsh64x64 x (MOVDconst [c]))
@@ -5961,8 +5961,8 @@ func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
- v.AddArg(x)
v.AuxInt = 63
+ v.AddArg(x)
return true
}
// match: (Rsh64x64 x y)
@@ -6093,10 +6093,10 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRLconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh8Ux64 _ (MOVDconst [c]))
@@ -6247,10 +6247,10 @@ func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh8x64 x (MOVDconst [c]))
@@ -6267,10 +6267,10 @@ func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = 63
return true
}
// match: (Rsh8x64 x y)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 96b5759531..d30454239f 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -8,8 +8,6 @@ import "math"
var _ = math.MinInt8 // in case not otherwise used
func rewriteValuePPC64(v *Value, config *Config) bool {
switch v.Op {
- case OpPPC64ADD:
- return rewriteValuePPC64_OpPPC64ADD(v, config)
case OpAdd16:
return rewriteValuePPC64_OpAdd16(v, config)
case OpAdd32:
@@ -154,22 +152,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
return rewriteValuePPC64_OpLess8U(v, config)
case OpLoad:
return rewriteValuePPC64_OpLoad(v, config)
- case OpPPC64MOVBstore:
- return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
- case OpPPC64MOVBstorezero:
- return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
- case OpPPC64MOVDstore:
- return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
- case OpPPC64MOVDstorezero:
- return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
- case OpPPC64MOVHstore:
- return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
- case OpPPC64MOVHstorezero:
- return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
- case OpPPC64MOVWstore:
- return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
- case OpPPC64MOVWstorezero:
- return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpMove:
return rewriteValuePPC64_OpMove(v, config)
case OpMul16:
@@ -216,6 +198,24 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
return rewriteValuePPC64_OpOr64(v, config)
case OpOr8:
return rewriteValuePPC64_OpOr8(v, config)
+ case OpPPC64ADD:
+ return rewriteValuePPC64_OpPPC64ADD(v, config)
+ case OpPPC64MOVBstore:
+ return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
+ case OpPPC64MOVBstorezero:
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
+ case OpPPC64MOVDstore:
+ return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
+ case OpPPC64MOVDstorezero:
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
+ case OpPPC64MOVHstore:
+ return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
+ case OpPPC64MOVHstorezero:
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
+ case OpPPC64MOVWstore:
+ return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
+ case OpPPC64MOVWstorezero:
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpSignExt16to32:
return rewriteValuePPC64_OpSignExt16to32(v, config)
case OpSignExt16to64:
@@ -283,41 +283,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
}
return false
}
-func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ADD (MOVDconst [c]) x)
- // cond:
- // result: (ADDconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64MOVDconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpPPC64ADDconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ADD x (MOVDconst [c]))
- // cond:
- // result: (ADDconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpPPC64ADDconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1691,330 +1656,6 @@ func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVBstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVDstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVDstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVDstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVHstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVWstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -2347,8 +1988,8 @@ func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
- v0.AddArg(src)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
return true
@@ -2725,6 +2366,365 @@ func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
return true
}
}
+func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADD (MOVDconst [c]) x)
+ // cond:
+ // result: (ADDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD x (MOVDconst [c]))
+ // cond:
+ // result: (ADDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVBstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVDstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVDstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVHstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVHstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVWstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3456,8 +3456,8 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
- v0.AddArg(ptr)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
return true
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index d2fbfb9f10..33d90f5341 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -198,19 +198,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
// cond: is64BitInt(v.Type) && v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(is64BitInt(v.Type) && v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
- v0.Aux = n
v0.AuxInt = off + 4
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
- v1.Aux = n
v1.AuxInt = off
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -218,19 +218,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
// cond: is64BitInt(v.Type) && !v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(is64BitInt(v.Type) && !v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
- v0.Aux = n
v0.AuxInt = off + 4
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
- v1.Aux = n
v1.AuxInt = off
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -738,13 +738,13 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
// cond: c <= 32
// result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c]))) (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
break
}
hi := v_0.Args[0]
lo := v_0.Args[1]
- c := v.AuxInt
if !(c <= 32) {
break
}
@@ -783,22 +783,22 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
// cond: c > 32
// result: (Lrot64 (Int64Make lo hi) [c-32])
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
break
}
hi := v_0.Args[0]
lo := v_0.Args[1]
- c := v.AuxInt
if !(c > 32) {
break
}
v.reset(OpLrot64)
+ v.AuxInt = c - 32
v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64())
v0.AddArg(lo)
v0.AddArg(hi)
v.AddArg(v0)
- v.AuxInt = c - 32
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 00bb24a67b..f4f2b50f62 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -733,8 +733,8 @@ func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool {
c := v_1.AuxInt
v.reset(OpOffPtr)
v.Type = t
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -1370,19 +1370,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsString()
// result: (StringMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsString()) {
break
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1390,23 +1390,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsSlice()
// result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsSlice()) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v2.Aux = n
v2.AuxInt = off + 2*config.PtrSize
+ v2.Aux = n
v.AddArg(v2)
return true
}
@@ -1414,19 +1414,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsInterface()
// result: (IMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsInterface()) {
break
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1434,19 +1434,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsComplex() && v.Type.Size() == 16
// result: (ComplexMake (Arg <config.fe.TypeFloat64()> {n} [off]) (Arg <config.fe.TypeFloat64()> {n} [off+8]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 16) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
- v1.Aux = n
v1.AuxInt = off + 8
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1454,19 +1454,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsComplex() && v.Type.Size() == 8
// result: (ComplexMake (Arg <config.fe.TypeFloat32()> {n} [off]) (Arg <config.fe.TypeFloat32()> {n} [off+4]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 8) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
- v1.Aux = n
v1.AuxInt = off + 4
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1486,15 +1486,15 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake1)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
return true
}
@@ -1503,19 +1503,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake2)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1524,23 +1524,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake3)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
- v2.Aux = n
v2.AuxInt = off + t.FieldOff(2)
+ v2.Aux = n
v.AddArg(v2)
return true
}
@@ -1549,27 +1549,27 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake4)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
- v2.Aux = n
v2.AuxInt = off + t.FieldOff(2)
+ v2.Aux = n
v.AddArg(v2)
v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3))
- v3.Aux = n
v3.AuxInt = off + t.FieldOff(3)
+ v3.Aux = n
v.AddArg(v3)
return true
}
@@ -6359,26 +6359,26 @@ func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
// cond:
// result: (OffPtr p [a+b])
for {
+ a := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpOffPtr {
break
}
- p := v_0.Args[0]
b := v_0.AuxInt
- a := v.AuxInt
+ p := v_0.Args[0]
v.reset(OpOffPtr)
- v.AddArg(p)
v.AuxInt = a + b
+ v.AddArg(p)
return true
}
// match: (OffPtr p [0])
// cond: v.Type.Compare(p.Type) == CMPeq
// result: p
for {
- p := v.Args[0]
if v.AuxInt != 0 {
break
}
+ p := v.Args[0]
if !(v.Type.Compare(p.Type) == CMPeq) {
break
}