aboutsummaryrefslogtreecommitdiff
path: root/src/cmd
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2017-04-28 14:12:28 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2017-05-09 23:01:51 +0000
commit46b88c9fbccad489aed53a77afa680792fff6eaa (patch)
treea1fcd0d8b8134f4be9eaf2a3bb16e8cc813d8724 /src/cmd
parent6a24b2d0c1f5a8e2fa5ddb56db5a207671e8beb8 (diff)
downloadgo-46b88c9fbccad489aed53a77afa680792fff6eaa.tar.gz
go-46b88c9fbccad489aed53a77afa680792fff6eaa.zip
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc. To avoid circular dependencies, we used an interface (ssa.Type) to represent type information in SSA. In the Go 1.9 cycle, gri extricated the Type type from package gc. As a result, we can now use it in package ssa. Now, instead of package types depending on package ssa, it is the other way. This is a more sensible dependency tree, and helps compiler performance a bit. Though this is a big CL, most of the changes are mechanical and uninteresting. Interesting bits: * Add new singleton globals to package types for the special SSA types Memory, Void, Invalid, Flags, and Int128. * Add two new Types, TSSA for the special types, and TTUPLE, for SSA tuple types. ssa.MakeTuple is now types.NewTuple. * Move type comparison result constants CMPlt, CMPeq, and CMPgt to package types. * We had picked the name "types" in our rules for the handy list of types provided by ssa.Config. That conflicted with the types package name, so change it to "typ". * Update the type comparison routine to handle tuples and special types inline. * Teach gc/fmt.go how to print special types. * We can now eliminate ElemTypes in favor of just Elem, and probably also some other duplicated Type methods designed to return ssa.Type instead of *types.Type. * The ssa tests were using their own dummy types, and they were not particularly careful about types in general. Of necessity, this CL switches them to use *types.Type; it does not make them more type-accurate. Unfortunately, using types.Type means initializing a bit of the types universe. This is prime for refactoring and improvement. This shrinks ssa.Value; it now fits in a smaller size class on 64 bit systems. This doesn't have a giant impact, though, since most Values are preallocated in a chunk. name old alloc/op new alloc/op delta Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8) Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10) GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10) Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10) GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9) Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8) Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10) XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10) [Geo mean] 40.5MB 40.3MB -0.68% name old allocs/op new allocs/op delta Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9) Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10) GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10) Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10) GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9) Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8) Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10) XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10) [Geo mean] 428k 428k -0.01% Removing all the interface calls helps non-trivially with CPU, though. name old time/op new time/op delta Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96) Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96) GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96) Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99) GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97) Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99) Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94) XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95) [Geo mean] 178ms 173ms -2.65% name old user-time/op new user-time/op delta Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99) Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95) GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99) Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96) GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100) Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92) Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100) XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97) [Geo mean] 220ms 213ms -2.76% Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1 Reviewed-on: https://go-review.googlesource.com/42145 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/cmd')
-rw-r--r--src/cmd/compile/fmt_test.go3
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go7
-rw-r--r--src/cmd/compile/internal/arm/ssa.go5
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go5
-rw-r--r--src/cmd/compile/internal/gc/fmt.go6
-rw-r--r--src/cmd/compile/internal/gc/phi.go9
-rw-r--r--src/cmd/compile/internal/gc/plive.go2
-rw-r--r--src/cmd/compile/internal/gc/ssa.go234
-rw-r--r--src/cmd/compile/internal/mips/ssa.go5
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go5
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go5
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go7
-rw-r--r--src/cmd/compile/internal/ssa/config.go51
-rw-r--r--src/cmd/compile/internal/ssa/copyelim_test.go5
-rw-r--r--src/cmd/compile/internal/ssa/cse.go23
-rw-r--r--src/cmd/compile/internal/ssa/cse_test.go73
-rw-r--r--src/cmd/compile/internal/ssa/deadcode_test.go19
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go7
-rw-r--r--src/cmd/compile/internal/ssa/deadstore_test.go63
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go26
-rw-r--r--src/cmd/compile/internal/ssa/dom_test.go63
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go103
-rw-r--r--src/cmd/compile/internal/ssa/func.go81
-rw-r--r--src/cmd/compile/internal/ssa/func_test.go107
-rw-r--r--src/cmd/compile/internal/ssa/fuse_test.go37
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules14
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules52
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules76
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules24
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules138
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules190
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules114
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules18
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec.rules70
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules324
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules320
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go22
-rw-r--r--src/cmd/compile/internal/ssa/location.go11
-rw-r--r--src/cmd/compile/internal/ssa/loop_test.go31
-rw-r--r--src/cmd/compile/internal/ssa/loopreschedchecks.go15
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck_test.go111
-rw-r--r--src/cmd/compile/internal/ssa/passbm_test.go21
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go7
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go27
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go19
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go356
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go1414
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go866
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go1052
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go1222
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go1678
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go1342
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go2012
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go120
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go976
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go758
-rw-r--r--src/cmd/compile/internal/ssa/schedule_test.go39
-rw-r--r--src/cmd/compile/internal/ssa/shift_test.go65
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit_test.go23
-rw-r--r--src/cmd/compile/internal/ssa/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go9
-rw-r--r--src/cmd/compile/internal/ssa/type.go201
-rw-r--r--src/cmd/compile/internal/ssa/type_test.go106
-rw-r--r--src/cmd/compile/internal/ssa/value.go3
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go35
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier_test.go29
-rw-r--r--src/cmd/compile/internal/ssa/zcse.go4
-rw-r--r--src/cmd/compile/internal/types/type.go182
-rw-r--r--src/cmd/compile/internal/types/utils.go2
-rw-r--r--src/cmd/compile/internal/x86/387.go3
-rw-r--r--src/cmd/compile/internal/x86/ssa.go7
71 files changed, 7489 insertions, 7572 deletions
diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go
index 6839265c85..59de326a91 100644
--- a/src/cmd/compile/fmt_test.go
+++ b/src/cmd/compile/fmt_test.go
@@ -590,6 +590,7 @@ var knownFormats = map[string]string{
"*cmd/compile/internal/types.Type %L": "",
"*cmd/compile/internal/types.Type %S": "",
"*cmd/compile/internal/types.Type %p": "",
+ "*cmd/compile/internal/types.Type %s": "",
"*cmd/compile/internal/types.Type %v": "",
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
@@ -633,8 +634,6 @@ var knownFormats = map[string]string{
"cmd/compile/internal/ssa.Location %v": "",
"cmd/compile/internal/ssa.Op %s": "",
"cmd/compile/internal/ssa.Op %v": "",
- "cmd/compile/internal/ssa.Type %s": "",
- "cmd/compile/internal/ssa.Type %v": "",
"cmd/compile/internal/ssa.ValAndOff %s": "",
"cmd/compile/internal/ssa.rbrank %d": "",
"cmd/compile/internal/ssa.regMask %d": "",
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 51010b4066..2d7727b270 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
@@ -38,7 +39,7 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
+func loadByType(t *types.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
@@ -52,7 +53,7 @@ func loadByType(t ssa.Type) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
+func storeByType(t *types.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
@@ -77,7 +78,7 @@ func storeByType(t ssa.Type) obj.As {
}
// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) obj.As {
+func moveByType(t *types.Type) obj.As {
if t.IsFloat() {
// Moving the whole sse2 register is faster
// than moving just the correct low portion of it.
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index ea9c3a9cc1..343f2d3aec 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -10,12 +10,13 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
+func loadByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
@@ -45,7 +46,7 @@ func loadByType(t ssa.Type) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
+func storeByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 7d79276e66..dec6a4e93e 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -9,12 +9,13 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
)
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
+func loadByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
@@ -50,7 +51,7 @@ func loadByType(t ssa.Type) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
+func storeByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index c0d82d8c16..2f56d8ab51 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -1794,6 +1794,12 @@ func tconv(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
if t == nil {
return "<T>"
}
+ if t.Etype == types.TSSA {
+ return t.Extra.(string)
+ }
+ if t.Etype == types.TTUPLE {
+ return t.FieldType(0).String() + "," + t.FieldType(1).String()
+ }
if depth > 100 {
return "<...>"
diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go
index 013bb0164f..0ce7a4b11d 100644
--- a/src/cmd/compile/internal/gc/phi.go
+++ b/src/cmd/compile/internal/gc/phi.go
@@ -6,6 +6,7 @@ package gc
import (
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/src"
"container/heap"
"fmt"
@@ -71,7 +72,7 @@ func (s *phiState) insertPhis() {
// Generate a numbering for these variables.
s.varnum = map[*Node]int32{}
var vars []*Node
- var vartypes []ssa.Type
+ var vartypes []*types.Type
for _, b := range s.f.Blocks {
for _, v := range b.Values {
if v.Op != ssa.OpFwdRef {
@@ -162,7 +163,7 @@ levels:
s.queued = newSparseSet(s.f.NumBlocks())
s.hasPhi = newSparseSet(s.f.NumBlocks())
s.hasDef = newSparseSet(s.f.NumBlocks())
- s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, ssa.TypeInvalid)
+ s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, types.TypeInvalid)
// Generate phi ops for each variable.
for n := range vartypes {
@@ -182,7 +183,7 @@ levels:
}
}
-func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ ssa.Type) {
+func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
priq := &s.priq
q := s.q
queued := s.queued
@@ -509,7 +510,7 @@ loop:
}
// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t ssa.Type, var_ *Node, line src.XPos) *ssa.Value {
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
for {
if v := s.defvars[b.ID][var_]; v != nil {
return v
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index 4811037311..ca449b72bd 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -930,7 +930,7 @@ func clobberPtr(b *ssa.Block, v *Node, offset int64) {
} else {
aux = &ssa.ArgSymbol{Node: v}
}
- b.NewValue0IA(src.NoXPos, ssa.OpClobber, ssa.TypeVoid, offset, aux)
+ b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, aux)
}
func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) {
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index d058118f27..d12d45f009 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -151,7 +151,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
s.fwdVars = map[*Node]*ssa.Value{}
- s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
+ s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
@@ -372,127 +372,133 @@ func (s *state) peekPos() src.XPos {
}
// newValue0 adds a new value with no arguments to the current block.
-func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
+func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekPos(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
-func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
+func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
-func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
+func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
-func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
+func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
-func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
-func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
+func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
-func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
-func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
+func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
-func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
-func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
-func (s *state) newValue3A(op ssa.Op, t ssa.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue4 adds a new value with four arguments to the current block.
-func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
}
// entryNewValue0 adds a new value with no arguments to the entry block.
-func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
+func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.f.Entry.NewValue0(s.peekPos(), op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
-func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
+func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux)
}
// entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
-func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
+func (s *state) entryNewValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.f.Entry.NewValue0I(s.peekPos(), op, t, auxint)
}
// entryNewValue1 adds a new value with one argument to the entry block.
-func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
+func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1(s.peekPos(), op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
-func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
+func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
-func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
-func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
-func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekPos(), t) }
-func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekPos(), t) }
-func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
-func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekPos(), t) }
+func (s *state) constSlice(t *types.Type) *ssa.Value {
+ return s.f.ConstSlice(s.peekPos(), t)
+}
+func (s *state) constInterface(t *types.Type) *ssa.Value {
+ return s.f.ConstInterface(s.peekPos(), t)
+}
+func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
+func (s *state) constEmptyString(t *types.Type) *ssa.Value {
+ return s.f.ConstEmptyString(s.peekPos(), t)
+}
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c)
}
-func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
+func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekPos(), t, c)
}
-func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
+func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
return s.f.ConstInt16(s.peekPos(), t, c)
}
-func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
+func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
return s.f.ConstInt32(s.peekPos(), t, c)
}
-func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
+func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
return s.f.ConstInt64(s.peekPos(), t, c)
}
-func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
+func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(s.peekPos(), t, c)
}
-func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
+func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekPos(), t, c)
}
-func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
+func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
if s.config.PtrSize == 8 {
return s.constInt64(t, c)
}
@@ -501,7 +507,7 @@ func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
}
return s.constInt32(t, int32(c))
}
-func (s *state) constOffPtrSP(t ssa.Type, c int64) *ssa.Value {
+func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
}
@@ -920,7 +926,7 @@ func (s *state) stmt(n *Node) {
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem())
}
case OVARLIVE:
@@ -928,7 +934,7 @@ func (s *state) stmt(n *Node) {
if !n.Left.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
}
- s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
case OCHECKNIL:
p := s.expr(n.Left)
@@ -955,8 +961,8 @@ func (s *state) exit() *ssa.Block {
for _, n := range s.returns {
addr := s.decladdrs[n]
val := s.variable(n, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, n.Type, addr, val, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
@@ -2187,11 +2193,11 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if inplace {
if sn.Op == ONAME {
// Tell liveness we're about to build a new slice
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, pt, addr, r[0], s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem())
// load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
@@ -2211,7 +2217,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
}
// Evaluate args
@@ -2242,7 +2248,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if arg.store {
s.storeType(et, addr, arg.v, 0)
} else {
- store := s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
+ store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem())
store.Aux = et
s.vars[&memVar] = store
}
@@ -2393,7 +2399,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
if left.Op == ONAME && skip == 0 {
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem())
}
if isReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
@@ -2407,9 +2413,9 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// Treat as a mem->mem move.
var store *ssa.Value
if right == nil {
- store = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
+ store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem())
} else {
- store = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
+ store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem())
}
store.Aux = t
s.vars[&memVar] = store
@@ -2470,7 +2476,7 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value {
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
- v.AddArg(s.zeroVal(t.FieldType(i).(*types.Type)))
+ v.AddArg(s.zeroVal(t.FieldType(i)))
}
return v
case t.IsArray():
@@ -2567,7 +2573,7 @@ func init() {
add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
- s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem())
+ s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
@@ -2597,100 +2603,100 @@ func init() {
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(s.f.Config.Types.BytePtr, ssa.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(types.Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(types.Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(types.Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
@@ -2931,12 +2937,12 @@ func init() {
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
},
sys.ArchAMD64)
add("math/big", "divWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
},
sys.ArchAMD64)
}
@@ -3111,7 +3117,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
argStart += int64(2 * Widthptr)
}
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
}
// Defer/go args
@@ -3120,9 +3126,9 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
@@ -3130,16 +3136,16 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
var call *ssa.Value
switch {
case k == callDefer:
- call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Deferproc, s.mem())
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem())
case k == callGo:
- call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, Newproc, s.mem())
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem())
case closure != nil:
codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem())
- call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
+ call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
- call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
+ call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
case sym != nil:
- call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym.Linksym(), s.mem())
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
default:
Fatalf("bad call type %v %v", n.Op, n)
}
@@ -3402,7 +3408,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
return
}
- s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
+ s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
@@ -3481,13 +3487,13 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
off = Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(t.PtrTo(), off)
size := t.Size()
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, ptr, arg, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthreg))
// Issue call
- call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn, s.mem())
+ call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
s.vars[&memVar] = call
if !returns {
@@ -3522,7 +3528,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) {
if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
return
}
@@ -3541,7 +3547,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask)
func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
case t.IsPtrShaped():
// no scalar fields.
case t.IsString():
@@ -3550,29 +3556,29 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
}
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
- s.storeTypeScalars(ft.(*types.Type), addr, val, 0)
+ s.storeTypeScalars(ft, addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
@@ -3587,28 +3593,28 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, left, right, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
- if !types.Haspointers(ft.(*types.Type)) {
+ if !types.Haspointers(ft) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
- s.storeTypePtrs(ft.(*types.Type), addr, val)
+ s.storeTypePtrs(ft, addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
@@ -3722,7 +3728,7 @@ func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value)
type u642fcvtTab struct {
geq, cvt2F, and, rsh, or, add ssa.Op
- one func(*state, ssa.Type, int64) *ssa.Value
+ one func(*state, *types.Type, int64) *ssa.Value
}
var u64_f64 u642fcvtTab = u642fcvtTab{
@@ -3925,8 +3931,8 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
type f2uCvtTab struct {
ltf, cvt2U, subf, or ssa.Op
- floatValue func(*state, ssa.Type, float64) *ssa.Value
- intValue func(*state, ssa.Type, int64) *ssa.Value
+ floatValue func(*state, *types.Type, float64) *ssa.Value
+ intValue func(*state, *types.Type, int64) *ssa.Value
cutoff uint64
}
@@ -3956,7 +3962,7 @@ var f32_u32 f2uCvtTab = f2uCvtTab{
subf: ssa.OpSub32F,
or: ssa.OpOr32,
floatValue: (*state).constFloat32,
- intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 2147483648,
}
@@ -3966,7 +3972,7 @@ var f64_u32 f2uCvtTab = f2uCvtTab{
subf: ssa.OpSub64F,
or: ssa.OpOr32,
floatValue: (*state).constFloat64,
- intValue: func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 2147483648,
}
@@ -4148,7 +4154,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// TODO: get rid of some of these temporaries.
tmp = tempAt(n.Pos, s.curfn, n.Type)
addr = s.addr(tmp, false)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
@@ -4199,7 +4205,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.Size(), addr, p, s.mem())
+ store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
@@ -4212,7 +4218,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
if tmp == nil {
s.vars[valVar] = s.zeroVal(n.Type)
} else {
- store := s.newValue2I(ssa.OpZero, ssa.TypeMem, n.Type.Size(), addr, s.mem())
+ store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
@@ -4227,7 +4233,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
delete(s.vars, valVar)
} else {
res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
- s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
}
resok = s.variable(&okVar, types.Types[TBOOL])
delete(s.vars, &okVar)
@@ -4235,7 +4241,7 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
+func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
@@ -4258,7 +4264,7 @@ func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
}
func (s *state) mem() *ssa.Value {
- return s.variable(&memVar, ssa.TypeMem)
+ return s.variable(&memVar, types.TypeMem)
}
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
@@ -4826,8 +4832,8 @@ func (e *ssafn) StringData(s string) interface{} {
return aux
}
-func (e *ssafn) Auto(pos src.XPos, t ssa.Type) ssa.GCNode {
- n := tempAt(pos, e.curfn, t.(*types.Type)) // Note: adds new auto to e.curfn.Func.Dcl list
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
+ n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
@@ -4864,7 +4870,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
- ptrType := types.NewPtr(name.Type.ElemType().(*types.Type))
+ ptrType := types.NewPtr(name.Type.ElemType())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this slice up into three separate variables.
@@ -4953,8 +4959,8 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
// namedAuto returns a new AUTO variable with the given name and type.
// These are exposed to the debugger.
-func (e *ssafn) namedAuto(name string, typ ssa.Type, pos src.XPos) ssa.GCNode {
- t := typ.(*types.Type)
+func (e *ssafn) namedAuto(name string, typ *types.Type, pos src.XPos) ssa.GCNode {
+ t := typ
s := &types.Sym{Name: name, Pkg: localpkg}
n := new(Node)
@@ -4976,8 +4982,8 @@ func (e *ssafn) namedAuto(name string, typ ssa.Type, pos src.XPos) ssa.GCNode {
return n
}
-func (e *ssafn) CanSSA(t ssa.Type) bool {
- return canSSAType(t.(*types.Type))
+func (e *ssafn) CanSSA(t *types.Type) bool {
+ return canSSAType(t)
}
func (e *ssafn) Line(pos src.XPos) string {
@@ -5036,6 +5042,6 @@ func (e *ssafn) Syslook(name string) *obj.LSym {
return nil
}
-func (n *Node) Typ() ssa.Type {
+func (n *Node) Typ() *types.Type {
return n.Type
}
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index 3673523af0..d2b4885eaa 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
@@ -24,7 +25,7 @@ func isHILO(r int16) bool {
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type, r int16) obj.As {
+func loadByType(t *types.Type, r int16) obj.As {
if isFPreg(r) {
if t.Size() == 4 { // float32 or int32
return mips.AMOVF
@@ -53,7 +54,7 @@ func loadByType(t ssa.Type, r int16) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type, r int16) obj.As {
+func storeByType(t *types.Type, r int16) obj.As {
if isFPreg(r) {
if t.Size() == 4 { // float32 or int32
return mips.AMOVF
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index 487d624ead..5a7a601942 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
@@ -24,7 +25,7 @@ func isHILO(r int16) bool {
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type, r int16) obj.As {
+func loadByType(t *types.Type, r int16) obj.As {
if isFPreg(r) {
if t.Size() == 4 { // float32 or int32
return mips.AMOVF
@@ -59,7 +60,7 @@ func loadByType(t ssa.Type, r int16) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type, r int16) obj.As {
+func storeByType(t *types.Type, r int16) obj.As {
if isFPreg(r) {
if t.Size() == 4 { // float32 or int32
return mips.AMOVF
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 4bd69a4723..5d902cdae1 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -7,6 +7,7 @@ package ppc64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
"math"
@@ -58,7 +59,7 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
+func loadByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
@@ -94,7 +95,7 @@ func loadByType(t ssa.Type) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
+func storeByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index d755859dcf..8722345a09 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
@@ -37,7 +38,7 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
+func loadByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
@@ -73,7 +74,7 @@ func loadByType(t ssa.Type) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
+func storeByType(t *types.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
@@ -98,7 +99,7 @@ func storeByType(t ssa.Type) obj.As {
}
// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) obj.As {
+func moveByType(t *types.Type) obj.As {
if t.IsFloat() {
return s390x.AFMOVD
} else {
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 07d5e49649..6587c40ebc 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
@@ -45,28 +46,28 @@ type (
)
type Types struct {
- Bool Type
- Int8 Type
- Int16 Type
- Int32 Type
- Int64 Type
- UInt8 Type
- UInt16 Type
- UInt32 Type
- UInt64 Type
- Int Type
- Float32 Type
- Float64 Type
- Uintptr Type
- String Type
- BytePtr Type // TODO: use unsafe.Pointer instead?
- Int32Ptr Type
- UInt32Ptr Type
- IntPtr Type
- UintptrPtr Type
- Float32Ptr Type
- Float64Ptr Type
- BytePtrPtr Type
+ Bool *types.Type
+ Int8 *types.Type
+ Int16 *types.Type
+ Int32 *types.Type
+ Int64 *types.Type
+ UInt8 *types.Type
+ UInt16 *types.Type
+ UInt32 *types.Type
+ UInt64 *types.Type
+ Int *types.Type
+ Float32 *types.Type
+ Float64 *types.Type
+ Uintptr *types.Type
+ String *types.Type
+ BytePtr *types.Type // TODO: use unsafe.Pointer instead?
+ Int32Ptr *types.Type
+ UInt32Ptr *types.Type
+ IntPtr *types.Type
+ UintptrPtr *types.Type
+ Float32Ptr *types.Type
+ Float64Ptr *types.Type
+ BytePtrPtr *types.Type
}
type Logger interface {
@@ -89,7 +90,7 @@ type Logger interface {
}
type Frontend interface {
- CanSSA(t Type) bool
+ CanSSA(t *types.Type) bool
Logger
@@ -98,7 +99,7 @@ type Frontend interface {
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
- Auto(src.XPos, Type) GCNode
+ Auto(src.XPos, *types.Type) GCNode
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
@@ -133,7 +134,7 @@ type Frontend interface {
// interface used to hold *gc.Node. We'd use *gc.Node directly but
// that would lead to an import cycle.
type GCNode interface {
- Typ() Type
+ Typ() *types.Type
String() string
}
diff --git a/src/cmd/compile/internal/ssa/copyelim_test.go b/src/cmd/compile/internal/ssa/copyelim_test.go
index 5de147297a..fe31b12191 100644
--- a/src/cmd/compile/internal/ssa/copyelim_test.go
+++ b/src/cmd/compile/internal/ssa/copyelim_test.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"fmt"
"testing"
)
@@ -20,11 +21,11 @@ func benchmarkCopyElim(b *testing.B, n int) {
c := testConfig(b)
values := make([]interface{}, 0, n+2)
- values = append(values, Valu("mem", OpInitMem, TypeMem, 0, nil))
+ values = append(values, Valu("mem", OpInitMem, types.TypeMem, 0, nil))
last := "mem"
for i := 0; i < n; i++ {
name := fmt.Sprintf("copy%d", i)
- values = append(values, Valu(name, OpCopy, TypeMem, 0, nil, last))
+ values = append(values, Valu(name, OpCopy, types.TypeMem, 0, nil, last))
last = name
}
values = append(values, Exit(last))
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
index 10f3b883b4..d852e22d5c 100644
--- a/src/cmd/compile/internal/ssa/cse.go
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"fmt"
"sort"
)
@@ -281,7 +282,7 @@ func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
j := 1
for ; j < len(a); j++ {
w := a[j]
- if cmpVal(v, w, auxIDs) != CMPeq {
+ if cmpVal(v, w, auxIDs) != types.CMPeq {
break
}
}
@@ -293,16 +294,16 @@ func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
return partition
}
-func lt2Cmp(isLt bool) Cmp {
+func lt2Cmp(isLt bool) types.Cmp {
if isLt {
- return CMPlt
+ return types.CMPlt
}
- return CMPgt
+ return types.CMPgt
}
type auxmap map[interface{}]int32
-func cmpVal(v, w *Value, auxIDs auxmap) Cmp {
+func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
// Try to order these comparison by cost (cheaper first)
if v.Op != w.Op {
return lt2Cmp(v.Op < w.Op)
@@ -322,21 +323,21 @@ func cmpVal(v, w *Value, auxIDs auxmap) Cmp {
return lt2Cmp(v.ID < w.ID)
}
- if tc := v.Type.Compare(w.Type); tc != CMPeq {
+ if tc := v.Type.Compare(w.Type); tc != types.CMPeq {
return tc
}
if v.Aux != w.Aux {
if v.Aux == nil {
- return CMPlt
+ return types.CMPlt
}
if w.Aux == nil {
- return CMPgt
+ return types.CMPgt
}
return lt2Cmp(auxIDs[v.Aux] < auxIDs[w.Aux])
}
- return CMPeq
+ return types.CMPeq
}
// Sort values to make the initial partition.
@@ -350,8 +351,8 @@ func (sv sortvalues) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
func (sv sortvalues) Less(i, j int) bool {
v := sv.a[i]
w := sv.a[j]
- if cmp := cmpVal(v, w, sv.auxIDs); cmp != CMPeq {
- return cmp == CMPlt
+ if cmp := cmpVal(v, w, sv.auxIDs); cmp != types.CMPeq {
+ return cmp == types.CMPlt
}
// Sort by value ID last to keep the sort result deterministic.
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
index dcb081332e..aab50eb7d4 100644
--- a/src/cmd/compile/internal/ssa/cse_test.go
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -4,7 +4,10 @@
package ssa
-import "testing"
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
type tstAux struct {
s string
@@ -21,24 +24,24 @@ func TestCSEAuxPartitionBug(t *testing.T) {
// them in an order that triggers the bug
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sp", OpSP, TypeBytePtr, 0, nil),
- Valu("r7", OpAdd64, TypeInt64, 0, nil, "arg3", "arg1"),
- Valu("r1", OpAdd64, TypeInt64, 0, nil, "arg1", "arg2"),
- Valu("arg1", OpArg, TypeInt64, 0, arg1Aux),
- Valu("arg2", OpArg, TypeInt64, 0, arg2Aux),
- Valu("arg3", OpArg, TypeInt64, 0, arg3Aux),
- Valu("r9", OpAdd64, TypeInt64, 0, nil, "r7", "r8"),
- Valu("r4", OpAdd64, TypeInt64, 0, nil, "r1", "r2"),
- Valu("r8", OpAdd64, TypeInt64, 0, nil, "arg3", "arg2"),
- Valu("r2", OpAdd64, TypeInt64, 0, nil, "arg1", "arg2"),
- Valu("raddr", OpAddr, TypeInt64Ptr, 0, nil, "sp"),
- Valu("raddrdef", OpVarDef, TypeMem, 0, nil, "start"),
- Valu("r6", OpAdd64, TypeInt64, 0, nil, "r4", "r5"),
- Valu("r3", OpAdd64, TypeInt64, 0, nil, "arg1", "arg2"),
- Valu("r5", OpAdd64, TypeInt64, 0, nil, "r2", "r3"),
- Valu("r10", OpAdd64, TypeInt64, 0, nil, "r6", "r9"),
- Valu("rstore", OpStore, TypeMem, 0, TypeInt64, "raddr", "r10", "raddrdef"),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.BytePtr, 0, nil),
+ Valu("r7", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg1"),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, arg1Aux),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, arg2Aux),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, arg3Aux),
+ Valu("r9", OpAdd64, c.config.Types.Int64, 0, nil, "r7", "r8"),
+ Valu("r4", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("r8", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg2"),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("raddr", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, nil, "start"),
+ Valu("r6", OpAdd64, c.config.Types.Int64, 0, nil, "r4", "r5"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("r5", OpAdd64, c.config.Types.Int64, 0, nil, "r2", "r3"),
+ Valu("r10", OpAdd64, c.config.Types.Int64, 0, nil, "r6", "r9"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r10", "raddrdef"),
Goto("exit")),
Bloc("exit",
Exit("rstore")))
@@ -89,22 +92,22 @@ func TestZCSE(t *testing.T) {
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sp", OpSP, TypeBytePtr, 0, nil),
- Valu("sb1", OpSB, TypeBytePtr, 0, nil),
- Valu("sb2", OpSB, TypeBytePtr, 0, nil),
- Valu("addr1", OpAddr, TypeInt64Ptr, 0, nil, "sb1"),
- Valu("addr2", OpAddr, TypeInt64Ptr, 0, nil, "sb2"),
- Valu("a1ld", OpLoad, TypeInt64, 0, nil, "addr1", "start"),
- Valu("a2ld", OpLoad, TypeInt64, 0, nil, "addr2", "start"),
- Valu("c1", OpConst64, TypeInt64, 1, nil),
- Valu("r1", OpAdd64, TypeInt64, 0, nil, "a1ld", "c1"),
- Valu("c2", OpConst64, TypeInt64, 1, nil),
- Valu("r2", OpAdd64, TypeInt64, 0, nil, "a2ld", "c2"),
- Valu("r3", OpAdd64, TypeInt64, 0, nil, "r1", "r2"),
- Valu("raddr", OpAddr, TypeInt64Ptr, 0, nil, "sp"),
- Valu("raddrdef", OpVarDef, TypeMem, 0, nil, "start"),
- Valu("rstore", OpStore, TypeMem, 0, TypeInt64, "raddr", "r3", "raddrdef"),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.BytePtr, 0, nil),
+ Valu("sb1", OpSB, c.config.Types.BytePtr, 0, nil),
+ Valu("sb2", OpSB, c.config.Types.BytePtr, 0, nil),
+ Valu("addr1", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb1"),
+ Valu("addr2", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb2"),
+ Valu("a1ld", OpLoad, c.config.Types.Int64, 0, nil, "addr1", "start"),
+ Valu("a2ld", OpLoad, c.config.Types.Int64, 0, nil, "addr2", "start"),
+ Valu("c1", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "a1ld", "c1"),
+ Valu("c2", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "a2ld", "c2"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("raddr", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, nil, "start"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r3", "raddrdef"),
Goto("exit")),
Bloc("exit",
Exit("rstore")))
diff --git a/src/cmd/compile/internal/ssa/deadcode_test.go b/src/cmd/compile/internal/ssa/deadcode_test.go
index 0f93bbf3fa..5777b841ef 100644
--- a/src/cmd/compile/internal/ssa/deadcode_test.go
+++ b/src/cmd/compile/internal/ssa/deadcode_test.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"fmt"
"strconv"
"testing"
@@ -14,14 +15,14 @@ func TestDeadLoop(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")),
Bloc("exit",
Exit("mem")),
// dead loop
Bloc("deadblock",
// dead value in dead block
- Valu("deadval", OpConstBool, TypeBool, 1, nil),
+ Valu("deadval", OpConstBool, c.config.Types.Bool, 1, nil),
If("deadval", "deadblock", "exit")))
CheckFunc(fun.f)
@@ -44,8 +45,8 @@ func TestDeadValue(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("deadval", OpConst64, TypeInt64, 37, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("deadval", OpConst64, c.config.Types.Int64, 37, nil),
Goto("exit")),
Bloc("exit",
Exit("mem")))
@@ -67,8 +68,8 @@ func TestNeverTaken(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("cond", OpConstBool, TypeBool, 0, nil),
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
If("cond", "then", "else")),
Bloc("then",
Goto("exit")),
@@ -102,8 +103,8 @@ func TestNestedDeadBlocks(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("cond", OpConstBool, TypeBool, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
If("cond", "b2", "b4")),
Bloc("b2",
If("cond", "b3", "b4")),
@@ -144,7 +145,7 @@ func BenchmarkDeadCode(b *testing.B) {
blocks := make([]bloc, 0, n+2)
blocks = append(blocks,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")))
blocks = append(blocks, Bloc("exit", Exit("mem")))
for i := 0; i < n; i++ {
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 54d319650f..de3c6aed74 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -4,7 +4,10 @@
package ssa
-import "cmd/internal/src"
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
// dse does dead-store elimination on the Function.
// Dead stores are those which are unconditionally followed by
@@ -88,7 +91,7 @@ func dse(f *Func) {
if v.Op == OpStore || v.Op == OpZero {
var sz int64
if v.Op == OpStore {
- sz = v.Aux.(Type).Size()
+ sz = v.Aux.(*types.Type).Size()
} else { // OpZero
sz = v.AuxInt
}
diff --git a/src/cmd/compile/internal/ssa/deadstore_test.go b/src/cmd/compile/internal/ssa/deadstore_test.go
index 3e38fe8efa..2326c6c413 100644
--- a/src/cmd/compile/internal/ssa/deadstore_test.go
+++ b/src/cmd/compile/internal/ssa/deadstore_test.go
@@ -4,25 +4,28 @@
package ssa
-import "testing"
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
func TestDeadStore(t *testing.T) {
c := testConfig(t)
- elemType := &TypeImpl{Size_: 1, Name: "testtype"}
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr", Elem_: elemType} // dummy for testing
+ ptrType := c.config.Types.BytePtr
+ t.Logf("PTRTYPE %v", ptrType)
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("v", OpConstBool, TypeBool, 1, nil),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
Valu("addr2", OpAddr, ptrType, 0, nil, "sb"),
Valu("addr3", OpAddr, ptrType, 0, nil, "sb"),
- Valu("zero1", OpZero, TypeMem, 1, TypeBool, "addr3", "start"),
- Valu("store1", OpStore, TypeMem, 0, TypeBool, "addr1", "v", "zero1"),
- Valu("store2", OpStore, TypeMem, 0, TypeBool, "addr2", "v", "store1"),
- Valu("store3", OpStore, TypeMem, 0, TypeBool, "addr1", "v", "store2"),
- Valu("store4", OpStore, TypeMem, 0, TypeBool, "addr3", "v", "store3"),
+ Valu("zero1", OpZero, types.TypeMem, 1, c.config.Types.Bool, "addr3", "start"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "zero1"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Valu("store3", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store2"),
+ Valu("store4", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr3", "v", "store3"),
Goto("exit")),
Bloc("exit",
Exit("store3")))
@@ -44,17 +47,17 @@ func TestDeadStore(t *testing.T) {
func TestDeadStorePhi(t *testing.T) {
// make sure we don't get into an infinite loop with phi values.
c := testConfig(t)
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("v", OpConstBool, TypeBool, 1, nil),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
Goto("loop")),
Bloc("loop",
- Valu("phi", OpPhi, TypeMem, 0, nil, "start", "store"),
- Valu("store", OpStore, TypeMem, 0, TypeBool, "addr", "v", "phi"),
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "store"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr", "v", "phi"),
If("v", "loop", "exit")),
Bloc("exit",
Exit("store")))
@@ -70,17 +73,17 @@ func TestDeadStoreTypes(t *testing.T) {
// types of the address fields are identical (where identicalness is
// decided by the CSE pass).
c := testConfig(t)
- t1 := &TypeImpl{Size_: 8, Ptr: true, Name: "t1"}
- t2 := &TypeImpl{Size_: 4, Ptr: true, Name: "t2"}
+ t1 := c.config.Types.UInt64.PtrTo()
+ t2 := c.config.Types.UInt32.PtrTo()
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("v", OpConstBool, TypeBool, 1, nil),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
Valu("addr1", OpAddr, t1, 0, nil, "sb"),
Valu("addr2", OpAddr, t2, 0, nil, "sb"),
- Valu("store1", OpStore, TypeMem, 0, TypeBool, "addr1", "v", "start"),
- Valu("store2", OpStore, TypeMem, 0, TypeBool, "addr2", "v", "store1"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "start"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
Goto("exit")),
Bloc("exit",
Exit("store2")))
@@ -101,15 +104,15 @@ func TestDeadStoreUnsafe(t *testing.T) {
// covers the case of two different types, but unsafe pointer casting
// can get to a point where the size is changed but type unchanged.
c := testConfig(t)
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ ptrType := c.config.Types.UInt64.PtrTo()
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("v", OpConstBool, TypeBool, 1, nil),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
- Valu("store1", OpStore, TypeMem, 0, TypeInt64, "addr1", "v", "start"), // store 8 bytes
- Valu("store2", OpStore, TypeMem, 0, TypeBool, "addr1", "v", "store1"), // store 1 byte
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "addr1", "v", "start"), // store 8 bytes
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store1"), // store 1 byte
Goto("exit")),
Bloc("exit",
Exit("store2")))
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
index beb89e3314..2b3f16c30c 100644
--- a/src/cmd/compile/internal/ssa/decompose.go
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -4,6 +4,8 @@
package ssa
+import "cmd/compile/internal/types"
+
// decompose converts phi ops on compound builtin types into phi
// ops on simple types.
// (The remaining compound ops are decomposed with rewrite rules.)
@@ -26,7 +28,7 @@ func decomposeBuiltIn(f *Func) {
t := name.Type
switch {
case t.IsInteger() && t.Size() > f.Config.RegSize:
- var elemType Type
+ var elemType *types.Type
if t.IsSigned() {
elemType = f.Config.Types.Int32
} else {
@@ -42,7 +44,7 @@ func decomposeBuiltIn(f *Func) {
}
delete(f.NamedValues, name)
case t.IsComplex():
- var elemType Type
+ var elemType *types.Type
if t.Size() == 16 {
elemType = f.Config.Types.Float64
} else {
@@ -160,19 +162,19 @@ func decomposeSlicePhi(v *Value) {
}
func decomposeInt64Phi(v *Value) {
- types := &v.Block.Func.Config.Types
- var partType Type
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
if v.Type.IsSigned() {
- partType = types.Int32
+ partType = cfgtypes.Int32
} else {
- partType = types.UInt32
+ partType = cfgtypes.UInt32
}
hi := v.Block.NewValue0(v.Pos, OpPhi, partType)
- lo := v.Block.NewValue0(v.Pos, OpPhi, types.UInt32)
+ lo := v.Block.NewValue0(v.Pos, OpPhi, cfgtypes.UInt32)
for _, a := range v.Args {
hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a))
- lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, types.UInt32, a))
+ lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, cfgtypes.UInt32, a))
}
v.reset(OpInt64Make)
v.AddArg(hi)
@@ -180,13 +182,13 @@ func decomposeInt64Phi(v *Value) {
}
func decomposeComplexPhi(v *Value) {
- types := &v.Block.Func.Config.Types
- var partType Type
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
switch z := v.Type.Size(); z {
case 8:
- partType = types.Float32
+ partType = cfgtypes.Float32
case 16:
- partType = types.Float64
+ partType = cfgtypes.Float64
default:
v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
}
diff --git a/src/cmd/compile/internal/ssa/dom_test.go b/src/cmd/compile/internal/ssa/dom_test.go
index c199c46ef3..fa517183c2 100644
--- a/src/cmd/compile/internal/ssa/dom_test.go
+++ b/src/cmd/compile/internal/ssa/dom_test.go
@@ -4,7 +4,10 @@
package ssa
-import "testing"
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
func BenchmarkDominatorsLinear(b *testing.B) { benchmarkDominators(b, 10000, genLinear) }
func BenchmarkDominatorsFwdBack(b *testing.B) { benchmarkDominators(b, 10000, genFwdBack) }
@@ -20,7 +23,7 @@ func genLinear(size int) []bloc {
var blocs []bloc
blocs = append(blocs,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto(blockn(0)),
),
)
@@ -43,8 +46,8 @@ func genFwdBack(size int) []bloc {
var blocs []bloc
blocs = append(blocs,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto(blockn(0)),
),
)
@@ -73,8 +76,8 @@ func genManyPred(size int) []bloc {
var blocs []bloc
blocs = append(blocs,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto(blockn(0)),
),
)
@@ -85,15 +88,15 @@ func genManyPred(size int) []bloc {
switch i % 3 {
case 0:
blocs = append(blocs, Bloc(blockn(i),
- Valu("a", OpConstBool, TypeBool, 1, nil),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto(blockn(i+1))))
case 1:
blocs = append(blocs, Bloc(blockn(i),
- Valu("a", OpConstBool, TypeBool, 1, nil),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
If("p", blockn(i+1), blockn(0))))
case 2:
blocs = append(blocs, Bloc(blockn(i),
- Valu("a", OpConstBool, TypeBool, 1, nil),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
If("p", blockn(i+1), blockn(size))))
}
}
@@ -111,8 +114,8 @@ func genMaxPred(size int) []bloc {
var blocs []bloc
blocs = append(blocs,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto(blockn(0)),
),
)
@@ -136,15 +139,15 @@ func genMaxPredValue(size int) []bloc {
var blocs []bloc
blocs = append(blocs,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto(blockn(0)),
),
)
for i := 0; i < size; i++ {
blocs = append(blocs, Bloc(blockn(i),
- Valu("a", OpConstBool, TypeBool, 1, nil),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
If("p", blockn(i+1), "exit")))
}
@@ -223,7 +226,7 @@ func TestDominatorsSingleBlock(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Exit("mem")))
doms := map[string]string{}
@@ -238,7 +241,7 @@ func TestDominatorsSimple(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("a")),
Bloc("a",
Goto("b")),
@@ -266,8 +269,8 @@ func TestDominatorsMultPredFwd(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
If("p", "a", "c")),
Bloc("a",
If("p", "b", "c")),
@@ -294,8 +297,8 @@ func TestDominatorsDeadCode(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 0, nil),
If("p", "b3", "b5")),
Bloc("b2", Exit("mem")),
Bloc("b3", Goto("b2")),
@@ -319,8 +322,8 @@ func TestDominatorsMultPredRev(t *testing.T) {
Bloc("entry",
Goto("first")),
Bloc("first",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto("a")),
Bloc("a",
If("p", "b", "first")),
@@ -348,8 +351,8 @@ func TestDominatorsMultPred(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
If("p", "a", "c")),
Bloc("a",
If("p", "b", "c")),
@@ -377,8 +380,8 @@ func TestInfiniteLoop(t *testing.T) {
// note lack of an exit block
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto("a")),
Bloc("a",
Goto("b")),
@@ -414,8 +417,8 @@ func TestDomTricky(t *testing.T) {
cfg := testConfig(t)
fun := cfg.Fun("1",
Bloc("1",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
Goto("4")),
Bloc("2",
Goto("11")),
@@ -490,8 +493,8 @@ func testDominatorsPostTricky(t *testing.T, b7then, b7else, b12then, b12else, b1
c := testConfig(t)
fun := c.Fun("b1",
Bloc("b1",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("p", OpConstBool, TypeBool, 1, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
If("p", "b3", "b2")),
Bloc("b3",
If("p", "b5", "b6")),
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 228a33697e..3bb67a951b 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -5,10 +5,12 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
"cmd/internal/obj/x86"
"cmd/internal/src"
+ "fmt"
"testing"
)
@@ -61,11 +63,11 @@ type DummyFrontend struct {
}
type DummyAuto struct {
- t Type
+ t *types.Type
s string
}
-func (d *DummyAuto) Typ() Type {
+func (d *DummyAuto) Typ() *types.Type {
return d.t
}
@@ -76,7 +78,7 @@ func (d *DummyAuto) String() string {
func (DummyFrontend) StringData(s string) interface{} {
return nil
}
-func (DummyFrontend) Auto(pos src.XPos, t Type) GCNode {
+func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode {
return &DummyAuto{t: t, s: "aDummyAuto"}
}
func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
@@ -128,34 +130,81 @@ func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t
func (d DummyFrontend) Debug_checknil() bool { return false }
func (d DummyFrontend) Debug_wb() bool { return false }
-var dummyTypes = Types{
- Bool: TypeBool,
- Int8: TypeInt8,
- Int16: TypeInt16,
- Int32: TypeInt32,
- Int64: TypeInt64,
- UInt8: TypeUInt8,
- UInt16: TypeUInt16,
- UInt32: TypeUInt32,
- UInt64: TypeUInt64,
- Float32: TypeFloat32,
- Float64: TypeFloat64,
- Int: TypeInt64,
- Uintptr: TypeUInt64,
- String: nil,
- BytePtr: TypeBytePtr,
- Int32Ptr: TypeInt32.PtrTo(),
- UInt32Ptr: TypeUInt32.PtrTo(),
- IntPtr: TypeInt64.PtrTo(),
- UintptrPtr: TypeUInt64.PtrTo(),
- Float32Ptr: TypeFloat32.PtrTo(),
- Float64Ptr: TypeFloat64.PtrTo(),
- BytePtrPtr: TypeBytePtr.PtrTo(),
+var dummyTypes Types
+
+func init() {
+ // Initialize just enough of the universe and the types package to make our tests function.
+ // TODO(josharian): move universe initialization to the types package,
+ // so this test setup can share it.
+
+ types.Tconv = func(t *types.Type, flag, mode, depth int) string {
+ return t.Etype.String()
+ }
+ types.Sconv = func(s *types.Sym, flag, mode int) string {
+ return "sym"
+ }
+ types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
+ fmt.Fprintf(s, "sym")
+ }
+ types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
+ fmt.Fprintf(s, "%v", t.Etype)
+ }
+ types.Dowidth = func(t *types.Type) {}
+
+ types.Tptr = types.TPTR64
+ for _, typ := range [...]struct {
+ width int64
+ et types.EType
+ }{
+ {1, types.TINT8},
+ {1, types.TUINT8},
+ {1, types.TBOOL},
+ {2, types.TINT16},
+ {2, types.TUINT16},
+ {4, types.TINT32},
+ {4, types.TUINT32},
+ {4, types.TFLOAT32},
+ {4, types.TFLOAT64},
+ {8, types.TUINT64},
+ {8, types.TINT64},
+ {8, types.TINT},
+ {8, types.TUINTPTR},
+ } {
+ t := types.New(typ.et)
+ t.Width = typ.width
+ t.Align = uint8(typ.width)
+ types.Types[typ.et] = t
+ }
+
+ dummyTypes = Types{
+ Bool: types.Types[types.TBOOL],
+ Int8: types.Types[types.TINT8],
+ Int16: types.Types[types.TINT16],
+ Int32: types.Types[types.TINT32],
+ Int64: types.Types[types.TINT64],
+ UInt8: types.Types[types.TUINT8],
+ UInt16: types.Types[types.TUINT16],
+ UInt32: types.Types[types.TUINT32],
+ UInt64: types.Types[types.TUINT64],
+ Float32: types.Types[types.TFLOAT32],
+ Float64: types.Types[types.TFLOAT64],
+ Int: types.Types[types.TINT],
+ Uintptr: types.Types[types.TUINTPTR],
+ String: types.Types[types.TSTRING],
+ BytePtr: types.NewPtr(types.Types[types.TUINT8]),
+ Int32Ptr: types.NewPtr(types.Types[types.TINT32]),
+ UInt32Ptr: types.NewPtr(types.Types[types.TUINT32]),
+ IntPtr: types.NewPtr(types.Types[types.TINT]),
+ UintptrPtr: types.NewPtr(types.Types[types.TUINTPTR]),
+ Float32Ptr: types.NewPtr(types.Types[types.TFLOAT32]),
+ Float64Ptr: types.NewPtr(types.Types[types.TFLOAT64]),
+ BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[types.TUINT8])),
+ }
}
func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
-func (d DummyFrontend) CanSSA(t Type) bool {
+func (d DummyFrontend) CanSSA(t *types.Type) bool {
// There are no un-SSAable types in dummy land.
return true
}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 64f67b4633..7ec596372a 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"crypto/sha1"
"fmt"
@@ -23,16 +24,16 @@ type writeSyncer interface {
// This package compiles each Func independently.
// Funcs are single-use; a new Func must be created for every compiled function.
type Func struct {
- Config *Config // architecture information
- Cache *Cache // re-usable cache
- fe Frontend // frontend state associated with this Func, callbacks into compiler frontend
- pass *pass // current pass information (name, options, etc.)
- Name string // e.g. bytes·Compare
- Type Type // type signature of the function.
- Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
- Entry *Block // the entry basic block
- bid idAlloc // block ID allocator
- vid idAlloc // value ID allocator
+ Config *Config // architecture information
+ Cache *Cache // re-usable cache
+ fe Frontend // frontend state associated with this Func, callbacks into compiler frontend
+ pass *pass // current pass information (name, options, etc.)
+ Name string // e.g. bytes·Compare
+ Type *types.Type // type signature of the function.
+ Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
+ Entry *Block // the entry basic block
+ bid idAlloc // block ID allocator
+ vid idAlloc // value ID allocator
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
@@ -107,7 +108,7 @@ func (f *Func) retSparseSet(ss *sparseSet) {
}
// newValue allocates a new Value with the given fields and places it at the end of b.Values.
-func (f *Func) newValue(op Op, t Type, b *Block, pos src.XPos) *Value {
+func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value {
var v *Value
if f.freeValues != nil {
v = f.freeValues
@@ -134,7 +135,7 @@ func (f *Func) newValue(op Op, t Type, b *Block, pos src.XPos) *Value {
// The returned value is not placed in any block. Once the caller
// decides on a block b, it must set b.Block and append
// the returned value to b.Values.
-func (f *Func) newValueNoBlock(op Op, t Type, pos src.XPos) *Value {
+func (f *Func) newValueNoBlock(op Op, t *types.Type, pos src.XPos) *Value {
var v *Value
if f.freeValues != nil {
v = f.freeValues
@@ -243,7 +244,7 @@ func (f *Func) freeBlock(b *Block) {
}
// NewValue0 returns a new value in the block with no arguments and zero aux values.
-func (b *Block) NewValue0(pos src.XPos, op Op, t Type) *Value {
+func (b *Block) NewValue0(pos src.XPos, op Op, t *types.Type) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Args = v.argstorage[:0]
@@ -251,7 +252,7 @@ func (b *Block) NewValue0(pos src.XPos, op Op, t Type) *Value {
}
// NewValue returns a new value in the block with no arguments and an auxint value.
-func (b *Block) NewValue0I(pos src.XPos, op Op, t Type, auxint int64) *Value {
+func (b *Block) NewValue0I(pos src.XPos, op Op, t *types.Type, auxint int64) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Args = v.argstorage[:0]
@@ -259,7 +260,7 @@ func (b *Block) NewValue0I(pos src.XPos, op Op, t Type, auxint int64) *Value {
}
// NewValue returns a new value in the block with no arguments and an aux value.
-func (b *Block) NewValue0A(pos src.XPos, op Op, t Type, aux interface{}) *Value {
+func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux interface{}) *Value {
if _, ok := aux.(int64); ok {
// Disallow int64 aux values. They should be in the auxint field instead.
// Maybe we want to allow this at some point, but for now we disallow it
@@ -274,7 +275,7 @@ func (b *Block) NewValue0A(pos src.XPos, op Op, t Type, aux interface{}) *Value
}
// NewValue returns a new value in the block with no arguments and both an auxint and aux values.
-func (b *Block) NewValue0IA(pos src.XPos, op Op, t Type, auxint int64, aux interface{}) *Value {
+func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Aux = aux
@@ -283,7 +284,7 @@ func (b *Block) NewValue0IA(pos src.XPos, op Op, t Type, auxint int64, aux inter
}
// NewValue1 returns a new value in the block with one argument and zero aux values.
-func (b *Block) NewValue1(pos src.XPos, op Op, t Type, arg *Value) *Value {
+func (b *Block) NewValue1(pos src.XPos, op Op, t *types.Type, arg *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Args = v.argstorage[:1]
@@ -293,7 +294,7 @@ func (b *Block) NewValue1(pos src.XPos, op Op, t Type, arg *Value) *Value {
}
// NewValue1I returns a new value in the block with one argument and an auxint value.
-func (b *Block) NewValue1I(pos src.XPos, op Op, t Type, auxint int64, arg *Value) *Value {
+func (b *Block) NewValue1I(pos src.XPos, op Op, t *types.Type, auxint int64, arg *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Args = v.argstorage[:1]
@@ -303,7 +304,7 @@ func (b *Block) NewValue1I(pos src.XPos, op Op, t Type, auxint int64, arg *Value
}
// NewValue1A returns a new value in the block with one argument and an aux value.
-func (b *Block) NewValue1A(pos src.XPos, op Op, t Type, aux interface{}, arg *Value) *Value {
+func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Aux = aux
@@ -314,7 +315,7 @@ func (b *Block) NewValue1A(pos src.XPos, op Op, t Type, aux interface{}, arg *Va
}
// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
-func (b *Block) NewValue1IA(pos src.XPos, op Op, t Type, auxint int64, aux interface{}, arg *Value) *Value {
+func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Aux = aux
@@ -325,7 +326,7 @@ func (b *Block) NewValue1IA(pos src.XPos, op Op, t Type, auxint int64, aux inter
}
// NewValue2 returns a new value in the block with two arguments and zero aux values.
-func (b *Block) NewValue2(pos src.XPos, op Op, t Type, arg0, arg1 *Value) *Value {
+func (b *Block) NewValue2(pos src.XPos, op Op, t *types.Type, arg0, arg1 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Args = v.argstorage[:2]
@@ -337,7 +338,7 @@ func (b *Block) NewValue2(pos src.XPos, op Op, t Type, arg0, arg1 *Value) *Value
}
// NewValue2I returns a new value in the block with two arguments and an auxint value.
-func (b *Block) NewValue2I(pos src.XPos, op Op, t Type, auxint int64, arg0, arg1 *Value) *Value {
+func (b *Block) NewValue2I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Args = v.argstorage[:2]
@@ -349,7 +350,7 @@ func (b *Block) NewValue2I(pos src.XPos, op Op, t Type, auxint int64, arg0, arg1
}
// NewValue3 returns a new value in the block with three arguments and zero aux values.
-func (b *Block) NewValue3(pos src.XPos, op Op, t Type, arg0, arg1, arg2 *Value) *Value {
+func (b *Block) NewValue3(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Args = v.argstorage[:3]
@@ -363,7 +364,7 @@ func (b *Block) NewValue3(pos src.XPos, op Op, t Type, arg0, arg1, arg2 *Value)
}
// NewValue3I returns a new value in the block with three arguments and an auxint value.
-func (b *Block) NewValue3I(pos src.XPos, op Op, t Type, auxint int64, arg0, arg1, arg2 *Value) *Value {
+func (b *Block) NewValue3I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = auxint
v.Args = v.argstorage[:3]
@@ -377,7 +378,7 @@ func (b *Block) NewValue3I(pos src.XPos, op Op, t Type, auxint int64, arg0, arg1
}
// NewValue3A returns a new value in the block with three argument and an aux value.
-func (b *Block) NewValue3A(pos src.XPos, op Op, t Type, aux interface{}, arg0, arg1, arg2 *Value) *Value {
+func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Aux = aux
@@ -392,7 +393,7 @@ func (b *Block) NewValue3A(pos src.XPos, op Op, t Type, aux interface{}, arg0, a
}
// NewValue4 returns a new value in the block with four arguments and zero aux values.
-func (b *Block) NewValue4(pos src.XPos, op Op, t Type, arg0, arg1, arg2, arg3 *Value) *Value {
+func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, arg3 *Value) *Value {
v := b.Func.newValue(op, t, b, pos)
v.AuxInt = 0
v.Args = []*Value{arg0, arg1, arg2, arg3}
@@ -404,13 +405,13 @@ func (b *Block) NewValue4(pos src.XPos, op Op, t Type, arg0, arg1, arg2, arg3 *V
}
// constVal returns a constant value for c.
-func (f *Func) constVal(pos src.XPos, op Op, t Type, c int64, setAuxInt bool) *Value {
+func (f *Func) constVal(pos src.XPos, op Op, t *types.Type, c int64, setAuxInt bool) *Value {
if f.constants == nil {
f.constants = make(map[int64][]*Value)
}
vv := f.constants[c]
for _, v := range vv {
- if v.Op == op && v.Type.Compare(t) == CMPeq {
+ if v.Op == op && v.Type.Compare(t) == types.CMPeq {
if setAuxInt && v.AuxInt != c {
panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c))
}
@@ -439,47 +440,47 @@ const (
)
// ConstInt returns an int constant representing its argument.
-func (f *Func) ConstBool(pos src.XPos, t Type, c bool) *Value {
+func (f *Func) ConstBool(pos src.XPos, t *types.Type, c bool) *Value {
i := int64(0)
if c {
i = 1
}
return f.constVal(pos, OpConstBool, t, i, true)
}
-func (f *Func) ConstInt8(pos src.XPos, t Type, c int8) *Value {
+func (f *Func) ConstInt8(pos src.XPos, t *types.Type, c int8) *Value {
return f.constVal(pos, OpConst8, t, int64(c), true)
}
-func (f *Func) ConstInt16(pos src.XPos, t Type, c int16) *Value {
+func (f *Func) ConstInt16(pos src.XPos, t *types.Type, c int16) *Value {
return f.constVal(pos, OpConst16, t, int64(c), true)
}
-func (f *Func) ConstInt32(pos src.XPos, t Type, c int32) *Value {
+func (f *Func) ConstInt32(pos src.XPos, t *types.Type, c int32) *Value {
return f.constVal(pos, OpConst32, t, int64(c), true)
}
-func (f *Func) ConstInt64(pos src.XPos, t Type, c int64) *Value {
+func (f *Func) ConstInt64(pos src.XPos, t *types.Type, c int64) *Value {
return f.constVal(pos, OpConst64, t, c, true)
}
-func (f *Func) ConstFloat32(pos src.XPos, t Type, c float64) *Value {
+func (f *Func) ConstFloat32(pos src.XPos, t *types.Type, c float64) *Value {
return f.constVal(pos, OpConst32F, t, int64(math.Float64bits(float64(float32(c)))), true)
}
-func (f *Func) ConstFloat64(pos src.XPos, t Type, c float64) *Value {
+func (f *Func) ConstFloat64(pos src.XPos, t *types.Type, c float64) *Value {
return f.constVal(pos, OpConst64F, t, int64(math.Float64bits(c)), true)
}
-func (f *Func) ConstSlice(pos src.XPos, t Type) *Value {
+func (f *Func) ConstSlice(pos src.XPos, t *types.Type) *Value {
return f.constVal(pos, OpConstSlice, t, constSliceMagic, false)
}
-func (f *Func) ConstInterface(pos src.XPos, t Type) *Value {
+func (f *Func) ConstInterface(pos src.XPos, t *types.Type) *Value {
return f.constVal(pos, OpConstInterface, t, constInterfaceMagic, false)
}
-func (f *Func) ConstNil(pos src.XPos, t Type) *Value {
+func (f *Func) ConstNil(pos src.XPos, t *types.Type) *Value {
return f.constVal(pos, OpConstNil, t, constNilMagic, false)
}
-func (f *Func) ConstEmptyString(pos src.XPos, t Type) *Value {
+func (f *Func) ConstEmptyString(pos src.XPos, t *types.Type) *Value {
v := f.constVal(pos, OpConstString, t, constEmptyStringMagic, false)
v.Aux = ""
return v
}
-func (f *Func) ConstOffPtrSP(pos src.XPos, t Type, c int64, sp *Value) *Value {
+func (f *Func) ConstOffPtrSP(pos src.XPos, t *types.Type, c int64, sp *Value) *Value {
v := f.constVal(pos, OpOffPtr, t, c, true)
if len(v.Args) == 0 {
v.AddArg(sp)
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
index 3c81f0876e..94ff27e9f5 100644
--- a/src/cmd/compile/internal/ssa/func_test.go
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -18,12 +18,12 @@
//
// fun := Fun("entry",
// Bloc("entry",
-// Valu("mem", OpInitMem, TypeMem, 0, nil),
+// Valu("mem", OpInitMem, types.TypeMem, 0, nil),
// Goto("exit")),
// Bloc("exit",
// Exit("mem")),
// Bloc("deadblock",
-// Valu("deadval", OpConstBool, TypeBool, 0, true),
+// Valu("deadval", OpConstBool, c.config.Types.Bool, 0, true),
// If("deadval", "deadblock", "exit")))
//
// and the Blocks or Values used in the Func can be accessed
@@ -37,6 +37,7 @@ package ssa
// the parser can be used instead of Fun.
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"reflect"
@@ -223,7 +224,7 @@ func Bloc(name string, entries ...interface{}) bloc {
}
// Valu defines a value in a block.
-func Valu(name string, op Op, t Type, auxint int64, aux interface{}, args ...string) valu {
+func Valu(name string, op Op, t *types.Type, auxint int64, aux interface{}, args ...string) valu {
return valu{name, op, t, auxint, aux, args}
}
@@ -266,7 +267,7 @@ type ctrl struct {
type valu struct {
name string
op Op
- t Type
+ t *types.Type
auxint int64
aux interface{}
args []string
@@ -276,10 +277,10 @@ func TestArgs(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("a", OpConst64, c.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, c.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")),
Bloc("exit",
Exit("mem")))
@@ -299,19 +300,19 @@ func TestEquiv(t *testing.T) {
{
cfg.Fun("entry",
Bloc("entry",
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")),
Bloc("exit",
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")),
Bloc("exit",
Exit("mem"))),
@@ -320,10 +321,10 @@ func TestEquiv(t *testing.T) {
{
cfg.Fun("entry",
Bloc("entry",
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")),
Bloc("exit",
Exit("mem"))),
@@ -331,10 +332,10 @@ func TestEquiv(t *testing.T) {
Bloc("exit",
Exit("mem")),
Bloc("entry",
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit"))),
},
}
@@ -351,71 +352,71 @@ func TestEquiv(t *testing.T) {
{
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Goto("exit")),
Bloc("exit",
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Exit("mem"))),
},
// value order changed
{
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
Exit("mem"))),
},
// value auxint different
{
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 14, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 26, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 26, nil),
Exit("mem"))),
},
// value aux different
{
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 0, 14),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, 14),
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 0, 26),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, 26),
Exit("mem"))),
},
// value args different
{
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 14, nil),
- Valu("b", OpConst64, TypeInt64, 26, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
Exit("mem"))),
cfg.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpConst64, TypeInt64, 0, nil),
- Valu("b", OpConst64, TypeInt64, 14, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "b", "a"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "b", "a"),
Exit("mem"))),
},
}
@@ -434,14 +435,14 @@ func TestConstCache(t *testing.T) {
c := testConfig(t)
f := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
Exit("mem")))
- v1 := f.f.ConstBool(src.NoXPos, TypeBool, false)
- v2 := f.f.ConstBool(src.NoXPos, TypeBool, true)
+ v1 := f.f.ConstBool(src.NoXPos, c.config.Types.Bool, false)
+ v2 := f.f.ConstBool(src.NoXPos, c.config.Types.Bool, true)
f.f.freeValue(v1)
f.f.freeValue(v2)
- v3 := f.f.ConstBool(src.NoXPos, TypeBool, false)
- v4 := f.f.ConstBool(src.NoXPos, TypeBool, true)
+ v3 := f.f.ConstBool(src.NoXPos, c.config.Types.Bool, false)
+ v4 := f.f.ConstBool(src.NoXPos, c.config.Types.Bool, true)
if v3.AuxInt != 0 {
t.Errorf("expected %s to have auxint of 0\n", v3.LongString())
}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
index 3a0aecc21c..beae15af48 100644
--- a/src/cmd/compile/internal/ssa/fuse_test.go
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -1,23 +1,24 @@
package ssa
import (
+ "cmd/compile/internal/types"
"fmt"
"strconv"
"testing"
)
func TestFuseEliminatesOneBranch(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil),
- Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
If("bool1", "then", "exit")),
Bloc("then",
Goto("exit")),
@@ -35,17 +36,17 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
}
func TestFuseEliminatesBothBranches(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil),
- Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
If("bool1", "then", "else")),
Bloc("then",
Goto("exit")),
@@ -68,17 +69,17 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
}
func TestFuseHandlesPhis(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil),
- Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
If("bool1", "then", "else")),
Bloc("then",
Goto("exit")),
@@ -105,8 +106,8 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("z0")),
Bloc("z1",
Goto("z2")),
@@ -138,9 +139,9 @@ func BenchmarkFuse(b *testing.B) {
blocks := make([]bloc, 0, 2*n+3)
blocks = append(blocks,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("cond", OpArg, TypeBool, 0, nil),
- Valu("x", OpArg, TypeInt64, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, nil),
Goto("exit")))
phiArgs := make([]string, 0, 2*n)
@@ -153,7 +154,7 @@ func BenchmarkFuse(b *testing.B) {
}
blocks = append(blocks,
Bloc("merge",
- Valu("phi", OpPhi, TypeMem, 0, nil, phiArgs...),
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, phiArgs...),
Goto("exit")),
Bloc("exit",
Exit("mem")))
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index c67796ea09..15e2a0606c 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -68,8 +68,8 @@
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
-(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
-(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
+(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && config.use387 -> (FCHS x)
(Neg64F x) && config.use387 -> (FCHS x)
@@ -256,12 +256,12 @@
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 -> (MOVLstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
// Lowering moves
(Move [0] _ _ mem) -> mem
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index f1cc31ad0b..5543395404 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -78,8 +78,8 @@
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
-(Neg32F x) -> (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
-(Neg64F x) -> (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
+(Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
(Com64 x) -> (NOTQ x)
(Com32 x) -> (NOTL x)
@@ -97,19 +97,19 @@
(OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
// Lowering other arithmetic
-(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
-(Ctz32 x) -> (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x)))
+(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+(Ctz32 x) -> (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x)))
-(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
-(BitLen32 x) -> (BitLen64 (MOVLQZX <types.UInt64> x))
+(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+(BitLen32 x) -> (BitLen64 (MOVLQZX <typ.UInt64> x))
(Bswap64 x) -> (BSWAPQ x)
(Bswap32 x) -> (BSWAPL x)
(PopCount64 x) -> (POPCNTQ x)
(PopCount32 x) -> (POPCNTL x)
-(PopCount16 x) -> (POPCNTL (MOVWQZX <types.UInt32> x))
-(PopCount8 x) -> (POPCNTL (MOVBQZX <types.UInt32> x))
+(PopCount16 x) -> (POPCNTL (MOVWQZX <typ.UInt32> x))
+(PopCount8 x) -> (POPCNTL (MOVBQZX <typ.UInt32> x))
(Sqrt x) -> (SQRTSD x)
@@ -305,13 +305,13 @@
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 -> (MOVQstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 -> (MOVLstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVQstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
// Lowering moves
(Move [0] _ _ mem) -> mem
@@ -477,10 +477,10 @@
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
-(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem))
-(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem))
-(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
-(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
+(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
// Atomic exchanges.
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
@@ -566,8 +566,8 @@
(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
// Disabled because it interferes with the pattern match above and makes worse code.
-// (SETNEF x) -> (ORQ (SETNE <types.Int8> x) (SETNAN <types.Int8> x))
-// (SETEQF x) -> (ANDQ (SETEQ <types.Int8> x) (SETORD <types.Int8> x))
+// (SETNEF x) -> (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
+// (SETEQF x) -> (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
// fold constants into instructions
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
@@ -1898,7 +1898,7 @@
&& clobber(s0)
&& clobber(s1)
&& clobber(or)
- -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
(ORQ
s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))
@@ -1919,7 +1919,7 @@
&& clobber(s0)
&& clobber(s1)
&& clobber(or)
- -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
(ORQ
s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
@@ -1944,7 +1944,7 @@
&& clobber(s0)
&& clobber(s1)
&& clobber(or)
- -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y)
+ -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
// Big-endian indexed loads
@@ -2044,7 +2044,7 @@
&& clobber(s0)
&& clobber(s1)
&& clobber(or)
- -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ -> @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
(ORQ
s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))
@@ -2065,7 +2065,7 @@
&& clobber(s0)
&& clobber(s1)
&& clobber(or)
- -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
(ORQ
s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))
@@ -2090,7 +2090,7 @@
&& clobber(s0)
&& clobber(s1)
&& clobber(or)
- -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ -> @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
// Combine 2 byte stores + shift into rolw 8 + word store
(MOVBstore [i] {s} p w
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index e24cede540..e92c58b7d8 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -34,12 +34,12 @@
(Mul32uhilo x y) -> (MULLU x y)
(Div32 x y) ->
- (SUB (XOR <types.UInt32> // negate the result if one operand is negative
- (Select0 <types.UInt32> (CALLudiv
- (SUB <types.UInt32> (XOR x <types.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
- (SUB <types.UInt32> (XOR y <types.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
- (Signmask (XOR <types.UInt32> x y))) (Signmask (XOR <types.UInt32> x y)))
-(Div32u x y) -> (Select0 <types.UInt32> (CALLudiv x y))
+ (SUB (XOR <typ.UInt32> // negate the result if one operand is negative
+ (Select0 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+(Div32u x y) -> (Select0 <typ.UInt32> (CALLudiv x y))
(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
@@ -48,12 +48,12 @@
(Div64F x y) -> (DIVD x y)
(Mod32 x y) ->
- (SUB (XOR <types.UInt32> // negate the result if x is negative
- (Select1 <types.UInt32> (CALLudiv
- (SUB <types.UInt32> (XOR <types.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
- (SUB <types.UInt32> (XOR <types.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
+ (SUB (XOR <typ.UInt32> // negate the result if x is negative
+ (Select1 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask x)) (Signmask x))
-(Mod32u x y) -> (Select1 <types.UInt32> (CALLudiv x y))
+(Mod32u x y) -> (Select1 <typ.UInt32> (CALLudiv x y))
(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
@@ -117,7 +117,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XORconst [1] (XOR <types.Bool> x y))
+(EqB x y) -> (XORconst [1] (XOR <typ.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
@@ -166,11 +166,11 @@
(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c])
-(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
-(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c])
-(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
-(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
@@ -182,8 +182,8 @@
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
-(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [31])
-(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// constants
(Const8 [val]) -> (MOVWconst [val])
@@ -210,7 +210,7 @@
(SignExt16to32 x) -> (MOVHreg x)
(Signmask x) -> (SRAconst x [31])
-(Zeromask x) -> (SRAconst (RSBshiftRL <types.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+(Zeromask x) -> (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
// float <-> int conversion
@@ -299,23 +299,23 @@
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore ptr (MOVWconst [0]) mem)
(Zero [2] ptr mem) ->
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore ptr (MOVWconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem))
(Zero [4] ptr mem) ->
@@ -333,29 +333,29 @@
// 4 and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%4 == 0 && s > 4 && s <= 512
- && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice ->
+ && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice ->
(DUFFZERO [4 * (128 - int64(s/4))] ptr (MOVWconst [0]) mem)
// Large zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0 ->
- (LoweredZero [t.(Type).Alignment()]
+ && (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 ->
+ (LoweredZero [t.(*types.Type).Alignment()]
ptr
- (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
(MOVWconst [0])
mem)
// moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
-(Move [2] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore dst (MOVHUload src mem) mem)
(Move [2] dst src mem) ->
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [2] dst (MOVHUload [2] src mem)
(MOVHstore dst (MOVHUload src mem) mem))
(Move [4] dst src mem) ->
@@ -373,16 +373,16 @@
// 8 and 128 are magic constants, see runtime/mkduff.go
(Move [s] {t} dst src mem)
&& s%4 == 0 && s > 4 && s <= 512
- && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice ->
+ && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice ->
(DUFFCOPY [8 * (128 - int64(s/4))] dst src mem)
// Large move uses a loop
(Move [s] {t} dst src mem)
- && (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0 ->
- (LoweredMove [t.(Type).Alignment()]
+ && (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 ->
+ (LoweredMove [t.(*types.Type).Alignment()]
dst
src
- (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
mem)
// calls
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 110ca8c3b1..5b2b462220 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -27,8 +27,8 @@
(Hmul64 x y) -> (MULH x y)
(Hmul64u x y) -> (UMULH x y)
-(Hmul32 x y) -> (SRAconst (MULL <types.Int64> x y) [32])
-(Hmul32u x y) -> (SRAconst (UMULL <types.UInt64> x y) [32])
+(Hmul32 x y) -> (SRAconst (MULL <typ.Int64> x y) [32])
+(Hmul32u x y) -> (SRAconst (UMULL <typ.UInt64> x y) [32])
(Div64 x y) -> (DIV x y)
(Div64u x y) -> (UDIV x y)
@@ -86,20 +86,20 @@
(Ctz64 <t> x) -> (CLZ (RBIT <t> x))
(Ctz32 <t> x) -> (CLZW (RBITW <t> x))
-(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <types.Int> x))
+(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
(Bswap64 x) -> (REV x)
(Bswap32 x) -> (REVW x)
(BitRev64 x) -> (RBIT x)
(BitRev32 x) -> (RBITW x)
-(BitRev16 x) -> (SRLconst [48] (RBIT <types.UInt64> x))
-(BitRev8 x) -> (SRLconst [56] (RBIT <types.UInt64> x))
+(BitRev16 x) -> (SRLconst [48] (RBIT <typ.UInt64> x))
+(BitRev8 x) -> (SRLconst [56] (RBIT <typ.UInt64> x))
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XOR (MOVDconst [1]) (XOR <types.Bool> x y))
+(EqB x y) -> (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XOR (MOVDconst [1]) x)
@@ -338,12 +338,12 @@
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
// zeroing
(Zero [0] _ mem) -> mem
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 5124daa48d..bb09d2334b 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -10,7 +10,7 @@
(Add64F x y) -> (ADDD x y)
(Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
-(Select1 (Add32carry <t> x y)) -> (SGTU <types.Bool> x (ADD <t.FieldType(0)> x y))
+(Select1 (Add32carry <t> x y)) -> (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
(Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
(SubPtr x y) -> (SUB x y)
@@ -21,7 +21,7 @@
(Sub64F x y) -> (SUBD x y)
(Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
-(Select1 (Sub32carry <t> x y)) -> (SGTU <types.Bool> (SUB <t.FieldType(0)> x y) x)
+(Select1 (Sub32carry <t> x y)) -> (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
(Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
(Mul32 x y) -> (MUL x y)
@@ -72,11 +72,11 @@
(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
-(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
-(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
+(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
+(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
-(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
-(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
+(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
+(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
@@ -88,8 +88,8 @@
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
-(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [31])
-(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [31])
+(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// shifts
// hardware instruction uses only the low 5 bits of the shift
@@ -118,17 +118,17 @@
(Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
(Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Rsh32x32 x y) -> (SRA x ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh32x16 x y) -> (SRA x ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh32x8 x y) -> (SRA x ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh32x32 x y) -> (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh32x16 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh32x8 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
// unary ops
(Neg32 x) -> (NEG x)
@@ -153,7 +153,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XORconst [1] (XOR <types.Bool> x y))
+(EqB x y) -> (XORconst [1] (XOR <typ.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
@@ -267,23 +267,23 @@
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore ptr (MOVWconst [0]) mem)
(Zero [2] ptr mem) ->
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore ptr (MOVWconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem))
(Zero [4] ptr mem) ->
@@ -295,18 +295,18 @@
(MOVBstore [2] ptr (MOVWconst [0])
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [6] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [4] ptr (MOVWconst [0])
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore [0] ptr (MOVWconst [0]) mem))
-(Zero [12] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [16] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [12] ptr (MOVWconst [0])
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
@@ -314,23 +314,23 @@
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 16 || t.(Type).Alignment()%4 != 0) ->
- (LoweredZero [t.(Type).Alignment()]
+ && (s > 16 || t.(*types.Type).Alignment()%4 != 0) ->
+ (LoweredZero [t.(*types.Type).Alignment()]
ptr
- (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
mem)
// moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
-(Move [2] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore dst (MOVHUload src mem) mem)
(Move [2] dst src mem) ->
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [2] dst (MOVHUload [2] src mem)
(MOVHstore dst (MOVHUload src mem) mem))
(Move [4] dst src mem) ->
@@ -342,23 +342,23 @@
(MOVBstore [2] dst (MOVBUload [2] src mem)
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem)))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))))
-(Move [6] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
-(Move [12] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
-(Move [16] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [12] dst (MOVWload [12] src mem)
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
@@ -367,11 +367,11 @@
// large or unaligned move uses a loop
(Move [s] {t} dst src mem)
- && (s > 16 || t.(Type).Alignment()%4 != 0) ->
- (LoweredMove [t.(Type).Alignment()]
+ && (s > 16 || t.(*types.Type).Alignment()%4 != 0) ->
+ (LoweredMove [t.(*types.Type).Alignment()]
dst
src
- (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
mem)
// calls
@@ -393,40 +393,40 @@
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
(AtomicOr8 ptr val mem) && !config.BigEndian ->
- (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
- (SLL <types.UInt32> (ZeroExt8to32 val)
- (SLLconst <types.UInt32> [3]
- (ANDconst <types.UInt32> [3] ptr))) mem)
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
(AtomicAnd8 ptr val mem) && !config.BigEndian ->
- (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
- (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
- (SLLconst <types.UInt32> [3]
- (ANDconst <types.UInt32> [3] ptr)))
- (NORconst [0] <types.UInt32> (SLL <types.UInt32>
- (MOVWconst [0xff]) (SLLconst <types.UInt32> [3]
- (ANDconst <types.UInt32> [3] ptr))))) mem)
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr)))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))))) mem)
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
(AtomicOr8 ptr val mem) && config.BigEndian ->
- (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
- (SLL <types.UInt32> (ZeroExt8to32 val)
- (SLLconst <types.UInt32> [3]
- (ANDconst <types.UInt32> [3]
- (XORconst <types.UInt32> [3] ptr)))) mem)
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
(AtomicAnd8 ptr val mem) && config.BigEndian ->
- (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr)
- (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
- (SLLconst <types.UInt32> [3]
- (ANDconst <types.UInt32> [3]
- (XORconst <types.UInt32> [3] ptr))))
- (NORconst [0] <types.UInt32> (SLL <types.UInt32>
- (MOVWconst [0xff]) (SLLconst <types.UInt32> [3]
- (ANDconst <types.UInt32> [3]
- (XORconst <types.UInt32> [3] ptr)))))) mem)
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr))))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))))) mem)
// checks
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index 6dd5461f1f..2a3a9c2018 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -27,8 +27,8 @@
(Hmul64 x y) -> (Select0 (MULV x y))
(Hmul64u x y) -> (Select0 (MULVU x y))
-(Hmul32 x y) -> (SRAVconst (Select1 <types.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
-(Hmul32u x y) -> (SRLVconst (Select1 <types.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+(Hmul32 x y) -> (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) -> (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
(Div64 x y) -> (Select1 (DIVV x y))
(Div64u x y) -> (Select1 (DIVVU x y))
@@ -71,65 +71,65 @@
// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
-(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
-(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> x y))
-(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
-(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
-(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
-
-(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
-(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
-(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
-
-(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
-(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
-(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
-
-(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
-(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
-(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
-(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
-
-(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
-(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
-(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
-(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
-
-(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
-(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
-(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
-(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
+(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
+(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
// unary ops
(Neg64 x) -> (NEGV x)
@@ -147,7 +147,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XOR (MOVVconst [1]) (XOR <types.Bool> x y))
+(EqB x y) -> (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
@@ -285,24 +285,24 @@
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zeroing
(Zero [0] _ mem) -> mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVVconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore ptr (MOVVconst [0]) mem)
(Zero [2] ptr mem) ->
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore ptr (MOVVconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem))
(Zero [4] ptr mem) ->
@@ -310,12 +310,12 @@
(MOVBstore [2] ptr (MOVVconst [0])
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem))))
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%8 == 0 ->
+(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 ->
(MOVVstore ptr (MOVVconst [0]) mem)
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [4] ptr (MOVVconst [0])
(MOVWstore [0] ptr (MOVVconst [0]) mem))
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [6] ptr (MOVVconst [0])
(MOVHstore [4] ptr (MOVVconst [0])
(MOVHstore [2] ptr (MOVVconst [0])
@@ -325,18 +325,18 @@
(MOVBstore [2] ptr (MOVVconst [0])
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [6] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [4] ptr (MOVVconst [0])
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [12] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [8] ptr (MOVVconst [0])
(MOVWstore [4] ptr (MOVVconst [0])
(MOVWstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [16] {t} ptr mem) && t.(Type).Alignment()%8 == 0 ->
+(Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 ->
(MOVVstore [8] ptr (MOVVconst [0])
(MOVVstore [0] ptr (MOVVconst [0]) mem))
-(Zero [24] {t} ptr mem) && t.(Type).Alignment()%8 == 0 ->
+(Zero [24] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 ->
(MOVVstore [16] ptr (MOVVconst [0])
(MOVVstore [8] ptr (MOVVconst [0])
(MOVVstore [0] ptr (MOVVconst [0]) mem)))
@@ -345,28 +345,28 @@
// 8, and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%8 == 0 && s > 24 && s <= 8*128
- && t.(Type).Alignment()%8 == 0 && !config.noDuffDevice ->
+ && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice ->
(DUFFZERO [8 * (128 - int64(s/8))] ptr mem)
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 8*128 || config.noDuffDevice) || t.(Type).Alignment()%8 != 0 ->
- (LoweredZero [t.(Type).Alignment()]
+ && (s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0 ->
+ (LoweredZero [t.(*types.Type).Alignment()]
ptr
- (ADDVconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
mem)
// moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
-(Move [2] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore dst (MOVHload src mem) mem)
(Move [2] dst src mem) ->
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))
(Move [4] dst src mem) ->
@@ -374,12 +374,12 @@
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%8 == 0 ->
+(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 ->
(MOVVstore dst (MOVVload src mem) mem)
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
@@ -389,29 +389,29 @@
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem)))
-(Move [6] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
-(Move [12] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
-(Move [16] {t} dst src mem) && t.(Type).Alignment()%8 == 0 ->
+(Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 ->
(MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem))
-(Move [24] {t} dst src mem) && t.(Type).Alignment()%8 == 0 ->
+(Move [24] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 ->
(MOVVstore [16] dst (MOVVload [16] src mem)
(MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem)))
// large or unaligned move uses a loop
(Move [s] {t} dst src mem)
- && s > 24 || t.(Type).Alignment()%8 != 0 ->
- (LoweredMove [t.(Type).Alignment()]
+ && s > 24 || t.(*types.Type).Alignment()%8 != 0 ->
+ (LoweredMove [t.(*types.Type).Alignment()]
dst
src
- (ADDVconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDVconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
mem)
// calls
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index 90a574841d..3caeda2488 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -163,72 +163,72 @@
(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
-(Rsh64x64 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-(Rsh64Ux64 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-(Lsh64x64 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Rsh64x64 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Rsh64Ux64 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Lsh64x64 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-(Rsh32x64 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-(Rsh32Ux64 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-(Lsh32x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Rsh32x64 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Rsh32Ux64 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Lsh32x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-(Lsh16x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Lsh16x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-(Lsh8x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Lsh8x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-(Rsh64x32 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-(Rsh64Ux32 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-(Lsh64x32 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Rsh64x32 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Rsh64Ux32 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Lsh64x32 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-(Rsh32x32 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-(Rsh32Ux32 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-(Lsh32x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Rsh32x32 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Rsh32Ux32 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Lsh32x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-(Lsh16x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Lsh16x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-(Lsh8x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Lsh8x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-(Rsh64x16 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-(Rsh64Ux16 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-(Lsh64x16 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Rsh64x16 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Rsh64Ux16 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Lsh64x16 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-(Rsh32x16 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-(Rsh32Ux16 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-(Lsh32x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Rsh32x16 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Rsh32Ux16 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Lsh32x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-(Lsh16x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Lsh16x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-(Lsh8x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Lsh8x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-(Rsh64x8 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-(Rsh64Ux8 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-(Lsh64x8 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Rsh64x8 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Rsh64Ux8 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Lsh64x8 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-(Rsh32x8 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-(Rsh32Ux8 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-(Lsh32x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Rsh32x8 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Rsh32Ux8 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Lsh32x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-(Lsh16x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Lsh16x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-(Lsh8x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Lsh8x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
// Cleaning up shift ops when input is masked
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
@@ -242,7 +242,7 @@
(Addr {sym} base) -> (MOVDaddr {sym} base)
// (Addr {sym} base) -> (ADDconst {sym} base)
-(OffPtr [off] ptr) -> (ADD (MOVDconst <types.Int64> [off]) ptr)
+(OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
(And64 x y) -> (AND x y)
(And32 x y) -> (AND x y)
@@ -486,13 +486,13 @@
(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
// Using Zero instead of LoweredZero allows the
// target address to be folded where possible.
@@ -573,7 +573,7 @@
(Move [4] dst src mem) ->
(MOVWstore dst (MOVWZload src mem) mem)
// MOVD for load and store must have offsets that are multiple of 4
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
(MOVDstore dst (MOVDload src mem) mem)
(Move [8] dst src mem) ->
(MOVWstore [4] dst (MOVWZload [4] src mem)
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index f54169de58..aed4f5cd71 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -322,13 +322,13 @@
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
// Lowering moves
@@ -437,7 +437,7 @@
(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
-(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <types.Bool> cond)) yes no)
+(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <typ.Bool> cond)) yes no)
// ***************************
// Above: lowering rules
@@ -446,8 +446,8 @@
// TODO: Should the optimizations be a separate pass?
// Fold unnecessary type conversions.
-(MOVDreg <t> x) && t.Compare(x.Type) == CMPeq -> x
-(MOVDnop <t> x) && t.Compare(x.Type) == CMPeq -> x
+(MOVDreg <t> x) && t.Compare(x.Type) == types.CMPeq -> x
+(MOVDnop <t> x) && t.Compare(x.Type) == types.CMPeq -> x
// Propagate constants through type conversions.
(MOVDreg (MOVDconst [c])) -> (MOVDconst [c])
diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules
index 377edba724..a475a2d26a 100644
--- a/src/cmd/compile/internal/ssa/gen/dec.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec.rules
@@ -13,28 +13,28 @@
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
(ComplexMake
- (Load <types.Float32> ptr mem)
- (Load <types.Float32>
- (OffPtr <types.Float32Ptr> [4] ptr)
+ (Load <typ.Float32> ptr mem)
+ (Load <typ.Float32>
+ (OffPtr <typ.Float32Ptr> [4] ptr)
mem)
)
-(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
- (Store {types.Float32}
- (OffPtr <types.Float32Ptr> [4] dst)
+(Store {t} dst (ComplexMake real imag) mem) && t.(*types.Type).Size() == 8 ->
+ (Store {typ.Float32}
+ (OffPtr <typ.Float32Ptr> [4] dst)
imag
- (Store {types.Float32} dst real mem))
+ (Store {typ.Float32} dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
- (Load <types.Float64> ptr mem)
- (Load <types.Float64>
- (OffPtr <types.Float64Ptr> [8] ptr)
+ (Load <typ.Float64> ptr mem)
+ (Load <typ.Float64>
+ (OffPtr <typ.Float64Ptr> [8] ptr)
mem)
)
-(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
- (Store {types.Float64}
- (OffPtr <types.Float64Ptr> [8] dst)
+(Store {t} dst (ComplexMake real imag) mem) && t.(*types.Type).Size() == 16 ->
+ (Store {typ.Float64}
+ (OffPtr <typ.Float64Ptr> [8] dst)
imag
- (Store {types.Float64} dst real mem))
+ (Store {typ.Float64} dst real mem))
// string ops
(StringPtr (StringMake ptr _)) -> ptr
@@ -42,15 +42,15 @@
(Load <t> ptr mem) && t.IsString() ->
(StringMake
- (Load <types.BytePtr> ptr mem)
- (Load <types.Int>
- (OffPtr <types.IntPtr> [config.PtrSize] ptr)
+ (Load <typ.BytePtr> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
mem))
(Store dst (StringMake ptr len) mem) ->
- (Store {types.Int}
- (OffPtr <types.IntPtr> [config.PtrSize] dst)
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
len
- (Store {types.BytePtr} dst ptr mem))
+ (Store {typ.BytePtr} dst ptr mem))
// slice ops
(SlicePtr (SliceMake ptr _ _ )) -> ptr
@@ -60,20 +60,20 @@
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
(Load <t.ElemType().PtrTo()> ptr mem)
- (Load <types.Int>
- (OffPtr <types.IntPtr> [config.PtrSize] ptr)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
mem)
- (Load <types.Int>
- (OffPtr <types.IntPtr> [2*config.PtrSize] ptr)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
mem))
(Store dst (SliceMake ptr len cap) mem) ->
- (Store {types.Int}
- (OffPtr <types.IntPtr> [2*config.PtrSize] dst)
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] dst)
cap
- (Store {types.Int}
- (OffPtr <types.IntPtr> [config.PtrSize] dst)
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
len
- (Store {types.BytePtr} dst ptr mem)))
+ (Store {typ.BytePtr} dst ptr mem)))
// interface ops
(ITab (IMake itab _)) -> itab
@@ -81,12 +81,12 @@
(Load <t> ptr mem) && t.IsInterface() ->
(IMake
- (Load <types.BytePtr> ptr mem)
- (Load <types.BytePtr>
- (OffPtr <types.BytePtrPtr> [config.PtrSize] ptr)
+ (Load <typ.BytePtr> ptr mem)
+ (Load <typ.BytePtr>
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
mem))
(Store dst (IMake itab data) mem) ->
- (Store {types.BytePtr}
- (OffPtr <types.BytePtrPtr> [config.PtrSize] dst)
+ (Store {typ.BytePtr}
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst)
data
- (Store {types.Uintptr} dst itab mem))
+ (Store {typ.Uintptr} dst itab mem))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
index ea7b95165f..b9ac3de313 100644
--- a/src/cmd/compile/internal/ssa/gen/dec64.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -4,7 +4,7 @@
// This file contains rules to decompose [u]int64 types on 32-bit
// architectures. These rules work together with the decomposeBuiltIn
-// pass which handles phis of these types.
+// pass which handles phis of these typ.
(Int64Hi (Int64Make hi _)) -> hi
(Int64Lo (Int64Make _ lo)) -> lo
@@ -12,31 +12,31 @@
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() ->
(Int64Make
- (Load <types.Int32> (OffPtr <types.Int32Ptr> [4] ptr) mem)
- (Load <types.UInt32> ptr mem))
+ (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() ->
(Int64Make
- (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem)
- (Load <types.UInt32> ptr mem))
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() ->
(Int64Make
- (Load <types.Int32> ptr mem)
- (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
+ (Load <typ.Int32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() ->
(Int64Make
- (Load <types.UInt32> ptr mem)
- (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
+ (Load <typ.UInt32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
-(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && !config.BigEndian ->
+(Store {t} dst (Int64Make hi lo) mem) && t.(*types.Type).Size() == 8 && !config.BigEndian ->
(Store {hi.Type}
(OffPtr <hi.Type.PtrTo()> [4] dst)
hi
(Store {lo.Type} dst lo mem))
-(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && config.BigEndian ->
+(Store {t} dst (Int64Make hi lo) mem) && t.(*types.Type).Size() == 8 && config.BigEndian ->
(Store {lo.Type}
(OffPtr <lo.Type.PtrTo()> [4] dst)
lo
@@ -44,94 +44,94 @@
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() ->
(Int64Make
- (Arg <types.Int32> {n} [off+4])
- (Arg <types.UInt32> {n} [off]))
+ (Arg <typ.Int32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
- (Arg <types.UInt32> {n} [off+4])
- (Arg <types.UInt32> {n} [off]))
+ (Arg <typ.UInt32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() ->
(Int64Make
- (Arg <types.Int32> {n} [off])
- (Arg <types.UInt32> {n} [off+4]))
+ (Arg <typ.Int32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
- (Arg <types.UInt32> {n} [off])
- (Arg <types.UInt32> {n} [off+4]))
+ (Arg <typ.UInt32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
(Add64 x y) ->
(Int64Make
- (Add32withcarry <types.Int32>
+ (Add32withcarry <typ.Int32>
(Int64Hi x)
(Int64Hi y)
- (Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
- (Select0 <types.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
(Sub64 x y) ->
(Int64Make
- (Sub32withcarry <types.Int32>
+ (Sub32withcarry <typ.Int32>
(Int64Hi x)
(Int64Hi y)
- (Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
- (Select0 <types.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
(Mul64 x y) ->
(Int64Make
- (Add32 <types.UInt32>
- (Mul32 <types.UInt32> (Int64Lo x) (Int64Hi y))
- (Add32 <types.UInt32>
- (Mul32 <types.UInt32> (Int64Hi x) (Int64Lo y))
- (Select0 <types.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
- (Select1 <types.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y))
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y))
+ (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
+ (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
(And64 x y) ->
(Int64Make
- (And32 <types.UInt32> (Int64Hi x) (Int64Hi y))
- (And32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
+ (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
(Or64 x y) ->
(Int64Make
- (Or32 <types.UInt32> (Int64Hi x) (Int64Hi y))
- (Or32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
+ (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
(Xor64 x y) ->
(Int64Make
- (Xor32 <types.UInt32> (Int64Hi x) (Int64Hi y))
- (Xor32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
+ (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
(Neg64 <t> x) -> (Sub64 (Const64 <t> [0]) x)
(Com64 x) ->
(Int64Make
- (Com32 <types.UInt32> (Int64Hi x))
- (Com32 <types.UInt32> (Int64Lo x)))
+ (Com32 <typ.UInt32> (Int64Hi x))
+ (Com32 <typ.UInt32> (Int64Lo x)))
(Ctz64 x) ->
- (Add32 <types.UInt32>
- (Ctz32 <types.UInt32> (Int64Lo x))
- (And32 <types.UInt32>
- (Com32 <types.UInt32> (Zeromask (Int64Lo x)))
- (Ctz32 <types.UInt32> (Int64Hi x))))
+ (Add32 <typ.UInt32>
+ (Ctz32 <typ.UInt32> (Int64Lo x))
+ (And32 <typ.UInt32>
+ (Com32 <typ.UInt32> (Zeromask (Int64Lo x)))
+ (Ctz32 <typ.UInt32> (Int64Hi x))))
(BitLen64 x) ->
- (Add32 <types.Int>
- (BitLen32 <types.Int> (Int64Hi x))
- (BitLen32 <types.Int>
- (Or32 <types.UInt32>
+ (Add32 <typ.Int>
+ (BitLen32 <typ.Int> (Int64Hi x))
+ (BitLen32 <typ.Int>
+ (Or32 <typ.UInt32>
(Int64Lo x)
(Zeromask (Int64Hi x)))))
(Bswap64 x) ->
(Int64Make
- (Bswap32 <types.UInt32> (Int64Lo x))
- (Bswap32 <types.UInt32> (Int64Hi x)))
+ (Bswap32 <typ.UInt32> (Int64Lo x))
+ (Bswap32 <typ.UInt32> (Int64Hi x)))
(SignExt32to64 x) -> (Int64Make (Signmask x) x)
(SignExt16to64 x) -> (SignExt32to64 (SignExt16to32 x))
(SignExt8to64 x) -> (SignExt32to64 (SignExt8to32 x))
-(ZeroExt32to64 x) -> (Int64Make (Const32 <types.UInt32> [0]) x)
+(ZeroExt32to64 x) -> (Int64Make (Const32 <typ.UInt32> [0]) x)
(ZeroExt16to64 x) -> (ZeroExt32to64 (ZeroExt16to32 x))
(ZeroExt8to64 x) -> (ZeroExt32to64 (ZeroExt8to32 x))
@@ -170,160 +170,160 @@
// turn x64 non-constant shifts to x32 shifts
// if high 32-bit of the shift is nonzero, make a huge shift
(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh64x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh64x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh64Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh32x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh32x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh32Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh16x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh16x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh16Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh8x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh8x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh8Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
// 64x left shift
// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
// result.lo = lo<<s
(Lsh64x32 (Int64Make hi lo) s) ->
(Int64Make
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Lsh32x32 <types.UInt32> hi s)
- (Rsh32Ux32 <types.UInt32>
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x32 <typ.UInt32> hi s)
+ (Rsh32Ux32 <typ.UInt32>
lo
- (Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s)))
- (Lsh32x32 <types.UInt32>
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Lsh32x32 <typ.UInt32>
lo
- (Sub32 <types.UInt32> s (Const32 <types.UInt32> [32]))))
- (Lsh32x32 <types.UInt32> lo s))
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))))
+ (Lsh32x32 <typ.UInt32> lo s))
(Lsh64x16 (Int64Make hi lo) s) ->
(Int64Make
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Lsh32x16 <types.UInt32> hi s)
- (Rsh32Ux16 <types.UInt32>
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x16 <typ.UInt32> hi s)
+ (Rsh32Ux16 <typ.UInt32>
lo
- (Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s)))
- (Lsh32x16 <types.UInt32>
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Lsh32x16 <typ.UInt32>
lo
- (Sub16 <types.UInt16> s (Const16 <types.UInt16> [32]))))
- (Lsh32x16 <types.UInt32> lo s))
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))))
+ (Lsh32x16 <typ.UInt32> lo s))
(Lsh64x8 (Int64Make hi lo) s) ->
(Int64Make
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Lsh32x8 <types.UInt32> hi s)
- (Rsh32Ux8 <types.UInt32>
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x8 <typ.UInt32> hi s)
+ (Rsh32Ux8 <typ.UInt32>
lo
- (Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s)))
- (Lsh32x8 <types.UInt32>
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Lsh32x8 <typ.UInt32>
lo
- (Sub8 <types.UInt8> s (Const8 <types.UInt8> [32]))))
- (Lsh32x8 <types.UInt32> lo s))
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))))
+ (Lsh32x8 <typ.UInt32> lo s))
// 64x unsigned right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
(Rsh64Ux32 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32Ux32 <types.UInt32> hi s)
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Rsh32Ux32 <types.UInt32> lo s)
- (Lsh32x32 <types.UInt32>
+ (Rsh32Ux32 <typ.UInt32> hi s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> lo s)
+ (Lsh32x32 <typ.UInt32>
hi
- (Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s)))
- (Rsh32Ux32 <types.UInt32>
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Rsh32Ux32 <typ.UInt32>
hi
- (Sub32 <types.UInt32> s (Const32 <types.UInt32> [32])))))
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
(Rsh64Ux16 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32Ux16 <types.UInt32> hi s)
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Rsh32Ux16 <types.UInt32> lo s)
- (Lsh32x16 <types.UInt32>
+ (Rsh32Ux16 <typ.UInt32> hi s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> lo s)
+ (Lsh32x16 <typ.UInt32>
hi
- (Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s)))
- (Rsh32Ux16 <types.UInt32>
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Rsh32Ux16 <typ.UInt32>
hi
- (Sub16 <types.UInt16> s (Const16 <types.UInt16> [32])))))
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
(Rsh64Ux8 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32Ux8 <types.UInt32> hi s)
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Rsh32Ux8 <types.UInt32> lo s)
- (Lsh32x8 <types.UInt32>
+ (Rsh32Ux8 <typ.UInt32> hi s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> lo s)
+ (Lsh32x8 <typ.UInt32>
hi
- (Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s)))
- (Rsh32Ux8 <types.UInt32>
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Rsh32Ux8 <typ.UInt32>
hi
- (Sub8 <types.UInt8> s (Const8 <types.UInt8> [32])))))
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
// 64x signed right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
(Rsh64x32 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32x32 <types.UInt32> hi s)
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Rsh32Ux32 <types.UInt32> lo s)
- (Lsh32x32 <types.UInt32>
+ (Rsh32x32 <typ.UInt32> hi s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> lo s)
+ (Lsh32x32 <typ.UInt32>
hi
- (Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s)))
- (And32 <types.UInt32>
- (Rsh32x32 <types.UInt32>
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x32 <typ.UInt32>
hi
- (Sub32 <types.UInt32> s (Const32 <types.UInt32> [32])))
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))
(Zeromask
- (Rsh32Ux32 <types.UInt32> s (Const32 <types.UInt32> [5]))))))
+ (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
(Rsh64x16 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32x16 <types.UInt32> hi s)
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Rsh32Ux16 <types.UInt32> lo s)
- (Lsh32x16 <types.UInt32>
+ (Rsh32x16 <typ.UInt32> hi s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> lo s)
+ (Lsh32x16 <typ.UInt32>
hi
- (Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s)))
- (And32 <types.UInt32>
- (Rsh32x16 <types.UInt32>
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x16 <typ.UInt32>
hi
- (Sub16 <types.UInt16> s (Const16 <types.UInt16> [32])))
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))
(Zeromask
(ZeroExt16to32
- (Rsh16Ux32 <types.UInt16> s (Const32 <types.UInt32> [5])))))))
+ (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
(Rsh64x8 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32x8 <types.UInt32> hi s)
- (Or32 <types.UInt32>
- (Or32 <types.UInt32>
- (Rsh32Ux8 <types.UInt32> lo s)
- (Lsh32x8 <types.UInt32>
+ (Rsh32x8 <typ.UInt32> hi s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> lo s)
+ (Lsh32x8 <typ.UInt32>
hi
- (Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s)))
- (And32 <types.UInt32>
- (Rsh32x8 <types.UInt32>
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x8 <typ.UInt32>
hi
- (Sub8 <types.UInt8> s (Const8 <types.UInt8> [32])))
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))
(Zeromask
(ZeroExt8to32
- (Rsh8Ux32 <types.UInt8> s (Const32 <types.UInt32> [5])))))))
+ (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
// 64xConst32 shifts
// we probably do not need them -- lateopt may take care of them just fine
@@ -333,48 +333,48 @@
//
//(Lsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
-// (Lsh32x32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c-32]))
-// (Const32 <types.UInt32> [0]))
+// (Lsh32x32 <typ.UInt32> (Int64Lo x) (Const32 <typ.UInt32> [c-32]))
+// (Const32 <typ.UInt32> [0]))
//(Rsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
// (Signmask (Int64Hi x))
-// (Rsh32x32 <types.Int32> (Int64Hi x) (Const32 <types.UInt32> [c-32])))
+// (Rsh32x32 <typ.Int32> (Int64Hi x) (Const32 <typ.UInt32> [c-32])))
//(Rsh64Ux32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
-// (Const32 <types.UInt32> [0])
-// (Rsh32Ux32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [c-32])))
+// (Const32 <typ.UInt32> [0])
+// (Rsh32Ux32 <typ.UInt32> (Int64Hi x) (Const32 <typ.UInt32> [c-32])))
//
-//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <types.UInt32> [0]))
+//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <typ.UInt32> [0]))
//(Rsh64x32 x (Const32 [32])) -> (Int64Make (Signmask (Int64Hi x)) (Int64Hi x))
-//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <types.UInt32> [0]) (Int64Hi x))
+//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <typ.UInt32> [0]) (Int64Hi x))
//
//(Lsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
-// (Or32 <types.UInt32>
-// (Lsh32x32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [c]))
-// (Rsh32Ux32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [32-c])))
-// (Lsh32x32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c])))
+// (Or32 <typ.UInt32>
+// (Lsh32x32 <typ.UInt32> (Int64Hi x) (Const32 <typ.UInt32> [c]))
+// (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Const32 <typ.UInt32> [32-c])))
+// (Lsh32x32 <typ.UInt32> (Int64Lo x) (Const32 <typ.UInt32> [c])))
//(Rsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
-// (Rsh32x32 <types.Int32> (Int64Hi x) (Const32 <types.UInt32> [c]))
-// (Or32 <types.UInt32>
-// (Rsh32Ux32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c]))
-// (Lsh32x32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [32-c]))))
+// (Rsh32x32 <typ.Int32> (Int64Hi x) (Const32 <typ.UInt32> [c]))
+// (Or32 <typ.UInt32>
+// (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Const32 <typ.UInt32> [c]))
+// (Lsh32x32 <typ.UInt32> (Int64Hi x) (Const32 <typ.UInt32> [32-c]))))
//(Rsh64Ux32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
-// (Rsh32Ux32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [c]))
-// (Or32 <types.UInt32>
-// (Rsh32Ux32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c]))
-// (Lsh32x32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [32-c]))))
+// (Rsh32Ux32 <typ.UInt32> (Int64Hi x) (Const32 <typ.UInt32> [c]))
+// (Or32 <typ.UInt32>
+// (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Const32 <typ.UInt32> [c]))
+// (Lsh32x32 <typ.UInt32> (Int64Hi x) (Const32 <typ.UInt32> [32-c]))))
//
//(Lsh64x32 x (Const32 [0])) -> x
//(Rsh64x32 x (Const32 [0])) -> x
//(Rsh64Ux32 x (Const32 [0])) -> x
(Const64 <t> [c]) && t.IsSigned() ->
- (Int64Make (Const32 <types.Int32> [c>>32]) (Const32 <types.UInt32> [int64(int32(c))]))
+ (Int64Make (Const32 <typ.Int32> [c>>32]) (Const32 <typ.UInt32> [int64(int32(c))]))
(Const64 <t> [c]) && !t.IsSigned() ->
- (Int64Make (Const32 <types.UInt32> [c>>32]) (Const32 <types.UInt32> [int64(int32(c))]))
+ (Int64Make (Const32 <typ.UInt32> [c>>32]) (Const32 <typ.UInt32> [int64(int32(c))]))
(Eq64 x y) ->
(AndB
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index d3a871731a..322de4de11 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -155,14 +155,14 @@
(Mul64 (Const64 [-1]) x) -> (Neg64 x)
// Convert multiplication by a power of two to a shift.
-(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(c)]))
-(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(c)]))
-(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(c)]))
-(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(c)]))
-(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
-(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
-(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
-(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
+(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
+(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
+(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
+(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
+(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
+(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
+(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
@@ -446,46 +446,46 @@
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh64Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Rsh64Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
(Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh32Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Rsh32Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
(Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh16Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Rsh16Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
(Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh8Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Rsh8Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh64x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Lsh64x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
(Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh32x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Lsh32x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
(Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh16x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Lsh16x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
(Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh8x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ -> (Lsh8x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
// replace shifts with zero extensions
-(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <types.UInt8> x))
-(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <types.UInt8> x))
-(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <types.UInt8> x))
-(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <types.UInt16> x))
-(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <types.UInt16> x))
-(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <types.UInt32> x))
+(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
// replace shifts with sign extensions
-(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <types.Int8> x))
-(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <types.Int8> x))
-(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <types.Int8> x))
-(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <types.Int16> x))
-(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <types.Int16> x))
-(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <types.Int32> x))
+(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <typ.Int32> x))
// constant comparisons
(Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)])
@@ -689,16 +689,16 @@
(NeqSlice x y) -> (NeqPtr (SlicePtr x) (SlicePtr y))
// Load of store of same address, with compatibly typed value and same size
-(Load <t1> p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size() -> x
+(Load <t1> p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size() -> x
// Collapse OffPtr
(OffPtr (OffPtr p [b]) [a]) -> (OffPtr p [a+b])
-(OffPtr p [0]) && v.Type.Compare(p.Type) == CMPeq -> p
+(OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq -> p
// indexing operations
// Note: bounds check has already been done
-(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().Size()])))
-(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().Size()])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [t.ElemType().Size()])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.ElemType().Size()])))
// struct operations
(StructSelect (StructMake1 x)) -> x
@@ -775,10 +775,10 @@
(StructSelect [0] x:(IData _)) -> x
// un-SSAable values use mem->mem copies
-(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(Type)) ->
- (Move {t} [t.(Type).Size()] dst src mem)
-(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(Type)) ->
- (Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
+(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(*types.Type)) ->
+ (Move {t} [t.(*types.Type).Size()] dst src mem)
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(*types.Type)) ->
+ (Move {t} [t.(*types.Type).Size()] dst src (VarDef {x} mem))
// array ops
(ArraySelect (ArrayMake1 x)) -> x
@@ -805,19 +805,19 @@
(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c])
(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
- (StringMake (ConstNil) (Const32 <types.Int> [0]))
+ (StringMake (ConstNil) (Const32 <typ.Int> [0]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) == "" ->
- (StringMake (ConstNil) (Const64 <types.Int> [0]))
+ (StringMake (ConstNil) (Const64 <typ.Int> [0]))
(ConstString {s}) && config.PtrSize == 4 && s.(string) != "" ->
(StringMake
- (Addr <types.BytePtr> {fe.StringData(s.(string))}
+ (Addr <typ.BytePtr> {fe.StringData(s.(string))}
(SB))
- (Const32 <types.Int> [int64(len(s.(string)))]))
+ (Const32 <typ.Int> [int64(len(s.(string)))]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) != "" ->
(StringMake
- (Addr <types.BytePtr> {fe.StringData(s.(string))}
+ (Addr <typ.BytePtr> {fe.StringData(s.(string))}
(SB))
- (Const64 <types.Int> [int64(len(s.(string)))]))
+ (Const64 <typ.Int> [int64(len(s.(string)))]))
// slice ops
// Only a few slice rules are provided here. See dec.rules for
@@ -833,19 +833,19 @@
(ConstSlice) && config.PtrSize == 4 ->
(SliceMake
(ConstNil <v.Type.ElemType().PtrTo()>)
- (Const32 <types.Int> [0])
- (Const32 <types.Int> [0]))
+ (Const32 <typ.Int> [0])
+ (Const32 <typ.Int> [0]))
(ConstSlice) && config.PtrSize == 8 ->
(SliceMake
(ConstNil <v.Type.ElemType().PtrTo()>)
- (Const64 <types.Int> [0])
- (Const64 <types.Int> [0]))
+ (Const64 <typ.Int> [0])
+ (Const64 <typ.Int> [0]))
// interface ops
(ConstInterface) ->
(IMake
- (ConstNil <types.BytePtr>)
- (ConstNil <types.BytePtr>))
+ (ConstNil <typ.BytePtr>)
+ (ConstNil <typ.BytePtr>))
(NilCheck (GetG mem) mem) -> mem
@@ -860,29 +860,29 @@
// Decompose compound argument values
(Arg {n} [off]) && v.Type.IsString() ->
(StringMake
- (Arg <types.BytePtr> {n} [off])
- (Arg <types.Int> {n} [off+config.PtrSize]))
+ (Arg <typ.BytePtr> {n} [off])
+ (Arg <typ.Int> {n} [off+config.PtrSize]))
(Arg {n} [off]) && v.Type.IsSlice() ->
(SliceMake
(Arg <v.Type.ElemType().PtrTo()> {n} [off])
- (Arg <types.Int> {n} [off+config.PtrSize])
- (Arg <types.Int> {n} [off+2*config.PtrSize]))
+ (Arg <typ.Int> {n} [off+config.PtrSize])
+ (Arg <typ.Int> {n} [off+2*config.PtrSize]))
(Arg {n} [off]) && v.Type.IsInterface() ->
(IMake
- (Arg <types.BytePtr> {n} [off])
- (Arg <types.BytePtr> {n} [off+config.PtrSize]))
+ (Arg <typ.BytePtr> {n} [off])
+ (Arg <typ.BytePtr> {n} [off+config.PtrSize]))
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 ->
(ComplexMake
- (Arg <types.Float64> {n} [off])
- (Arg <types.Float64> {n} [off+8]))
+ (Arg <typ.Float64> {n} [off])
+ (Arg <typ.Float64> {n} [off+8]))
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 ->
(ComplexMake
- (Arg <types.Float32> {n} [off])
- (Arg <types.Float32> {n} [off+4]))
+ (Arg <typ.Float32> {n} [off])
+ (Arg <typ.Float32> {n} [off+4]))
(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) ->
(StructMake0)
@@ -914,125 +914,125 @@
// See ../magic.go for a detailed description of these algorithms.
// Unsigned divide by power of 2. Strength reduce to a shift.
-(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <types.UInt64> [log2(c&0xff)]))
-(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <types.UInt64> [log2(c&0xffff)]))
-(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <types.UInt64> [log2(c&0xffffffff)]))
-(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <types.UInt64> [log2(c)]))
+(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <typ.UInt64> [log2(c&0xff)]))
+(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <typ.UInt64> [log2(c&0xffff)]))
+(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <typ.UInt64> [log2(c&0xffffffff)]))
+(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <typ.UInt64> [log2(c)]))
// Unsigned divide, not a power of 2. Strength reduce to a multiply.
// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
(Div8u x (Const8 [c])) && umagicOK(8, c) ->
(Trunc32to8
- (Rsh32Ux64 <types.UInt32>
- (Mul32 <types.UInt32>
- (Const32 <types.UInt32> [int64(1<<8+umagic(8,c).m)])
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(1<<8+umagic(8,c).m)])
(ZeroExt8to32 x))
- (Const64 <types.UInt64> [8+umagic(8,c).s])))
+ (Const64 <typ.UInt64> [8+umagic(8,c).s])))
// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 8 ->
(Trunc64to16
- (Rsh64Ux64 <types.UInt64>
- (Mul64 <types.UInt64>
- (Const64 <types.UInt64> [int64(1<<16+umagic(16,c).m)])
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<16+umagic(16,c).m)])
(ZeroExt16to64 x))
- (Const64 <types.UInt64> [16+umagic(16,c).s])))
+ (Const64 <typ.UInt64> [16+umagic(16,c).s])))
// For 16-bit divides on 32-bit machines
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0 ->
(Trunc32to16
- (Rsh32Ux64 <types.UInt32>
- (Mul32 <types.UInt32>
- (Const32 <types.UInt32> [int64(1<<15+umagic(16,c).m/2)])
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(1<<15+umagic(16,c).m/2)])
(ZeroExt16to32 x))
- (Const64 <types.UInt64> [16+umagic(16,c).s-1])))
+ (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && c&1 == 0 ->
(Trunc32to16
- (Rsh32Ux64 <types.UInt32>
- (Mul32 <types.UInt32>
- (Const32 <types.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)])
- (Rsh32Ux64 <types.UInt32> (ZeroExt16to32 x) (Const64 <types.UInt64> [1])))
- (Const64 <types.UInt64> [16+umagic(16,c).s-2])))
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [16+umagic(16,c).s-2])))
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 ->
(Trunc32to16
- (Rsh32Ux64 <types.UInt32>
+ (Rsh32Ux64 <typ.UInt32>
(Avg32u
- (Lsh32x64 <types.UInt32> (ZeroExt16to32 x) (Const64 <types.UInt64> [16]))
- (Mul32 <types.UInt32>
- (Const32 <types.UInt32> [int64(umagic(16,c).m)])
+ (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16]))
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(umagic(16,c).m)])
(ZeroExt16to32 x)))
- (Const64 <types.UInt64> [16+umagic(16,c).s-1])))
+ (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
// For 32-bit divides on 32-bit machines
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 ->
- (Rsh32Ux64 <types.UInt32>
- (Hmul32u <types.UInt32>
- (Const32 <types.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))])
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))])
x)
- (Const64 <types.UInt64> [umagic(32,c).s-1]))
+ (Const64 <typ.UInt64> [umagic(32,c).s-1]))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 ->
- (Rsh32Ux64 <types.UInt32>
- (Hmul32u <types.UInt32>
- (Const32 <types.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
- (Rsh32Ux64 <types.UInt32> x (Const64 <types.UInt64> [1])))
- (Const64 <types.UInt64> [umagic(32,c).s-2]))
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
+ (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic(32,c).s-2]))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 ->
- (Rsh32Ux64 <types.UInt32>
+ (Rsh32Ux64 <typ.UInt32>
(Avg32u
x
- (Hmul32u <types.UInt32>
- (Const32 <types.UInt32> [int64(int32(umagic(32,c).m))])
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(int32(umagic(32,c).m))])
x))
- (Const64 <types.UInt64> [umagic(32,c).s-1]))
+ (Const64 <typ.UInt64> [umagic(32,c).s-1]))
// For 32-bit divides on 64-bit machines
// We'll use a regular (non-hi) multiply for this case.
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0 ->
(Trunc64to32
- (Rsh64Ux64 <types.UInt64>
- (Mul64 <types.UInt64>
- (Const64 <types.UInt64> [int64(1<<31+umagic(32,c).m/2)])
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+umagic(32,c).m/2)])
(ZeroExt32to64 x))
- (Const64 <types.UInt64> [32+umagic(32,c).s-1])))
+ (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && c&1 == 0 ->
(Trunc64to32
- (Rsh64Ux64 <types.UInt64>
- (Mul64 <types.UInt64>
- (Const64 <types.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)])
- (Rsh64Ux64 <types.UInt64> (ZeroExt32to64 x) (Const64 <types.UInt64> [1])))
- (Const64 <types.UInt64> [32+umagic(32,c).s-2])))
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [32+umagic(32,c).s-2])))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 ->
(Trunc64to32
- (Rsh64Ux64 <types.UInt64>
+ (Rsh64Ux64 <typ.UInt64>
(Avg64u
- (Lsh64x64 <types.UInt64> (ZeroExt32to64 x) (Const64 <types.UInt64> [32]))
- (Mul64 <types.UInt64>
- (Const64 <types.UInt32> [int64(umagic(32,c).m)])
+ (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32]))
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt32> [int64(umagic(32,c).m)])
(ZeroExt32to64 x)))
- (Const64 <types.UInt64> [32+umagic(32,c).s-1])))
+ (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
// For 64-bit divides on 64-bit machines
// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 ->
- (Rsh64Ux64 <types.UInt64>
- (Hmul64u <types.UInt64>
- (Const64 <types.UInt64> [int64(1<<63+umagic(64,c).m/2)])
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+umagic(64,c).m/2)])
x)
- (Const64 <types.UInt64> [umagic(64,c).s-1]))
+ (Const64 <typ.UInt64> [umagic(64,c).s-1]))
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 ->
- (Rsh64Ux64 <types.UInt64>
- (Hmul64u <types.UInt64>
- (Const64 <types.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)])
- (Rsh64Ux64 <types.UInt64> x (Const64 <types.UInt64> [1])))
- (Const64 <types.UInt64> [umagic(64,c).s-2]))
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic(64,c).s-2]))
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 ->
- (Rsh64Ux64 <types.UInt64>
+ (Rsh64Ux64 <typ.UInt64>
(Avg64u
x
- (Hmul64u <types.UInt64>
- (Const64 <types.UInt64> [int64(umagic(64,c).m)])
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(umagic(64,c).m)])
x))
- (Const64 <types.UInt64> [umagic(64,c).s-1]))
+ (Const64 <typ.UInt64> [umagic(64,c).s-1]))
// Signed divide by a negative constant. Rewrite to divide by a positive constant.
(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 -> (Neg8 (Div8 <t> n (Const8 <t> [-c])))
@@ -1043,10 +1043,10 @@
// Dividing by the most-negative number. Result is always 0 except
// if the input is also the most-negative number.
// We can detect that using the sign bit of x & -x.
-(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <types.UInt64> [7 ]))
-(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <types.UInt64> [15]))
-(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <types.UInt64> [31]))
-(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <types.UInt64> [63]))
+(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
// Signed divide by power of 2.
// n / c = n >> log(c) if n >= 0
@@ -1054,96 +1054,96 @@
// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
(Div8 <t> n (Const8 [c])) && isPowerOfTwo(c) ->
(Rsh8x64
- (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <types.UInt64> [ 7])) (Const64 <types.UInt64> [ 8-log2(c)])))
- (Const64 <types.UInt64> [log2(c)]))
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [ 8-log2(c)])))
+ (Const64 <typ.UInt64> [log2(c)]))
(Div16 <t> n (Const16 [c])) && isPowerOfTwo(c) ->
(Rsh16x64
- (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <types.UInt64> [15])) (Const64 <types.UInt64> [16-log2(c)])))
- (Const64 <types.UInt64> [log2(c)]))
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [16-log2(c)])))
+ (Const64 <typ.UInt64> [log2(c)]))
(Div32 <t> n (Const32 [c])) && isPowerOfTwo(c) ->
(Rsh32x64
- (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <types.UInt64> [31])) (Const64 <types.UInt64> [32-log2(c)])))
- (Const64 <types.UInt64> [log2(c)]))
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [32-log2(c)])))
+ (Const64 <typ.UInt64> [log2(c)]))
(Div64 <t> n (Const64 [c])) && isPowerOfTwo(c) ->
(Rsh64x64
- (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <types.UInt64> [63])) (Const64 <types.UInt64> [64-log2(c)])))
- (Const64 <types.UInt64> [log2(c)]))
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [64-log2(c)])))
+ (Const64 <typ.UInt64> [log2(c)]))
// Signed divide, not a power of 2. Strength reduce to a multiply.
(Div8 <t> x (Const8 [c])) && smagicOK(8,c) ->
(Sub8 <t>
(Rsh32x64 <t>
- (Mul32 <types.UInt32>
- (Const32 <types.UInt32> [int64(smagic(8,c).m)])
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(smagic(8,c).m)])
(SignExt8to32 x))
- (Const64 <types.UInt64> [8+smagic(8,c).s]))
+ (Const64 <typ.UInt64> [8+smagic(8,c).s]))
(Rsh32x64 <t>
(SignExt8to32 x)
- (Const64 <types.UInt64> [31])))
+ (Const64 <typ.UInt64> [31])))
(Div16 <t> x (Const16 [c])) && smagicOK(16,c) ->
(Sub16 <t>
(Rsh32x64 <t>
- (Mul32 <types.UInt32>
- (Const32 <types.UInt32> [int64(smagic(16,c).m)])
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int64(smagic(16,c).m)])
(SignExt16to32 x))
- (Const64 <types.UInt64> [16+smagic(16,c).s]))
+ (Const64 <typ.UInt64> [16+smagic(16,c).s]))
(Rsh32x64 <t>
(SignExt16to32 x)
- (Const64 <types.UInt64> [31])))
+ (Const64 <typ.UInt64> [31])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 8 ->
(Sub32 <t>
(Rsh64x64 <t>
- (Mul64 <types.UInt64>
- (Const64 <types.UInt64> [int64(smagic(32,c).m)])
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(smagic(32,c).m)])
(SignExt32to64 x))
- (Const64 <types.UInt64> [32+smagic(32,c).s]))
+ (Const64 <typ.UInt64> [32+smagic(32,c).s]))
(Rsh64x64 <t>
(SignExt32to64 x)
- (Const64 <types.UInt64> [63])))
+ (Const64 <typ.UInt64> [63])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0 ->
(Sub32 <t>
(Rsh32x64 <t>
(Hmul32 <t>
- (Const32 <types.UInt32> [int64(int32(smagic(32,c).m/2))])
+ (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m/2))])
x)
- (Const64 <types.UInt64> [smagic(32,c).s-1]))
+ (Const64 <typ.UInt64> [smagic(32,c).s-1]))
(Rsh32x64 <t>
x
- (Const64 <types.UInt64> [31])))
+ (Const64 <typ.UInt64> [31])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0 ->
(Sub32 <t>
(Rsh32x64 <t>
(Add32 <t>
(Hmul32 <t>
- (Const32 <types.UInt32> [int64(int32(smagic(32,c).m))])
+ (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m))])
x)
x)
- (Const64 <types.UInt64> [smagic(32,c).s]))
+ (Const64 <typ.UInt64> [smagic(32,c).s]))
(Rsh32x64 <t>
x
- (Const64 <types.UInt64> [31])))
+ (Const64 <typ.UInt64> [31])))
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 == 0 ->
(Sub64 <t>
(Rsh64x64 <t>
(Hmul64 <t>
- (Const64 <types.UInt64> [int64(smagic(64,c).m/2)])
+ (Const64 <typ.UInt64> [int64(smagic(64,c).m/2)])
x)
- (Const64 <types.UInt64> [smagic(64,c).s-1]))
+ (Const64 <typ.UInt64> [smagic(64,c).s-1]))
(Rsh64x64 <t>
x
- (Const64 <types.UInt64> [63])))
+ (Const64 <typ.UInt64> [63])))
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 != 0 ->
(Sub64 <t>
(Rsh64x64 <t>
(Add64 <t>
(Hmul64 <t>
- (Const64 <types.UInt64> [int64(smagic(64,c).m)])
+ (Const64 <typ.UInt64> [int64(smagic(64,c).m)])
x)
x)
- (Const64 <types.UInt64> [smagic(64,c).s]))
+ (Const64 <typ.UInt64> [smagic(64,c).s]))
(Rsh64x64 <t>
x
- (Const64 <types.UInt64> [63])))
+ (Const64 <typ.UInt64> [63])))
// Unsigned mod by power of 2 constant.
(Mod8u <t> n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (And8 n (Const8 <t> [(c&0xff)-1]))
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 2a07d5bdfa..166b99d4ab 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -157,9 +157,11 @@ func genRules(arch arch) {
fmt.Fprintln(w, "import \"math\"")
fmt.Fprintln(w, "import \"cmd/internal/obj\"")
fmt.Fprintln(w, "import \"cmd/internal/objabi\"")
+ fmt.Fprintln(w, "import \"cmd/compile/internal/types\"")
fmt.Fprintln(w, "var _ = math.MinInt8 // in case not otherwise used")
fmt.Fprintln(w, "var _ = obj.ANOP // in case not otherwise used")
fmt.Fprintln(w, "var _ = objabi.GOROOT // in case not otherwise used")
+ fmt.Fprintln(w, "var _ = types.TypeMem // in case not otherwise used")
fmt.Fprintln(w)
const chunkSize = 10
@@ -230,9 +232,9 @@ func genRules(arch arch) {
hasb := strings.Contains(body, "b.")
hasconfig := strings.Contains(body, "config.") || strings.Contains(body, "config)")
hasfe := strings.Contains(body, "fe.")
- hasts := strings.Contains(body, "types.")
+ hastyps := strings.Contains(body, "typ.")
fmt.Fprintf(w, "func rewriteValue%s_%s_%d(v *Value) bool {\n", arch.name, op, chunk)
- if hasb || hasconfig || hasfe {
+ if hasb || hasconfig || hasfe || hastyps {
fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b")
}
@@ -244,9 +246,9 @@ func genRules(arch arch) {
fmt.Fprintln(w, "fe := b.Func.fe")
fmt.Fprintln(w, "_ = fe")
}
- if hasts {
- fmt.Fprintln(w, "types := &b.Func.Config.Types")
- fmt.Fprintln(w, "_ = types")
+ if hastyps {
+ fmt.Fprintln(w, "typ := &b.Func.Config.Types")
+ fmt.Fprintln(w, "_ = typ")
}
fmt.Fprint(w, body)
fmt.Fprintf(w, "}\n")
@@ -260,8 +262,8 @@ func genRules(arch arch) {
fmt.Fprintln(w, "_ = config")
fmt.Fprintln(w, "fe := b.Func.fe")
fmt.Fprintln(w, "_ = fe")
- fmt.Fprintln(w, "types := &config.Types")
- fmt.Fprintln(w, "_ = types")
+ fmt.Fprintln(w, "typ := &config.Types")
+ fmt.Fprintln(w, "_ = typ")
fmt.Fprintf(w, "switch b.Kind {\n")
ops = nil
for op := range blockrules {
@@ -731,13 +733,13 @@ func typeName(typ string) string {
if len(ts) != 2 {
panic("Tuple expect 2 arguments")
}
- return "MakeTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")"
+ return "types.NewTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")"
}
switch typ {
case "Flags", "Mem", "Void", "Int128":
- return "Type" + typ
+ return "types.Type" + typ
default:
- return "types." + typ
+ return "typ." + typ
}
}
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index f9eaedf092..41b48947aa 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -4,7 +4,10 @@
package ssa
-import "fmt"
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
// A place that an ssa variable can reside.
type Location interface {
@@ -26,9 +29,9 @@ func (r *Register) Name() string {
// A LocalSlot is a location in the stack frame.
// It is (possibly a subpiece of) a PPARAM, PPARAMOUT, or PAUTO ONAME node.
type LocalSlot struct {
- N GCNode // an ONAME *gc.Node representing a variable on the stack
- Type Type // type of slot
- Off int64 // offset of slot in N
+ N GCNode // an ONAME *gc.Node representing a variable on the stack
+ Type *types.Type // type of slot
+ Off int64 // offset of slot in N
}
func (s LocalSlot) Name() string {
diff --git a/src/cmd/compile/internal/ssa/loop_test.go b/src/cmd/compile/internal/ssa/loop_test.go
index f891703e2f..f8dcdb0132 100644
--- a/src/cmd/compile/internal/ssa/loop_test.go
+++ b/src/cmd/compile/internal/ssa/loop_test.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"testing"
)
@@ -47,27 +48,27 @@ func TestLoopConditionS390X(t *testing.T) {
c := testConfigS390X(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("SP", OpSP, TypeUInt64, 0, nil),
- Valu("ret", OpAddr, TypeInt64Ptr, 0, nil, "SP"),
- Valu("N", OpArg, TypeInt64, 0, c.Frontend().Auto(src.NoXPos, TypeInt64)),
- Valu("starti", OpConst64, TypeInt64, 0, nil),
- Valu("startsum", OpConst64, TypeInt64, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.UInt64, 0, nil),
+ Valu("ret", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "SP"),
+ Valu("N", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("starti", OpConst64, c.config.Types.Int64, 0, nil),
+ Valu("startsum", OpConst64, c.config.Types.Int64, 0, nil),
Goto("b1")),
Bloc("b1",
- Valu("phii", OpPhi, TypeInt64, 0, nil, "starti", "i"),
- Valu("phisum", OpPhi, TypeInt64, 0, nil, "startsum", "sum"),
- Valu("cmp1", OpLess64, TypeBool, 0, nil, "phii", "N"),
+ Valu("phii", OpPhi, c.config.Types.Int64, 0, nil, "starti", "i"),
+ Valu("phisum", OpPhi, c.config.Types.Int64, 0, nil, "startsum", "sum"),
+ Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "phii", "N"),
If("cmp1", "b2", "b3")),
Bloc("b2",
- Valu("c1", OpConst64, TypeInt64, 1, nil),
- Valu("i", OpAdd64, TypeInt64, 0, nil, "phii", "c1"),
- Valu("c3", OpConst64, TypeInt64, 3, nil),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "phisum", "c3"),
+ Valu("c1", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("i", OpAdd64, c.config.Types.Int64, 0, nil, "phii", "c1"),
+ Valu("c3", OpConst64, c.config.Types.Int64, 3, nil),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "phisum", "c3"),
Goto("b1")),
Bloc("b3",
- Valu("retdef", OpVarDef, TypeMem, 0, nil, "mem"),
- Valu("store", OpStore, TypeMem, 0, TypeInt64, "ret", "phisum", "retdef"),
+ Valu("retdef", OpVarDef, types.TypeMem, 0, nil, "mem"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ret", "phisum", "retdef"),
Exit("store")))
CheckFunc(fun.f)
Compile(fun.f)
diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go
index 863fc9ccb7..8ffca82a68 100644
--- a/src/cmd/compile/internal/ssa/loopreschedchecks.go
+++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go
@@ -4,7 +4,10 @@
package ssa
-import "fmt"
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
// an edgeMem records a backedge, together with the memory
// phi functions at the target of the backedge that must
@@ -84,7 +87,7 @@ func insertLoopReschedChecks(f *Func) {
// It's possible that there is no memory state (no global/pointer loads/stores or calls)
if lastMems[f.Entry.ID] == nil {
- lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, TypeMem)
+ lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, types.TypeMem)
}
memDefsAtBlockEnds := make([]*Value, f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block.
@@ -197,8 +200,8 @@ func insertLoopReschedChecks(f *Func) {
// if sp < g.limit { goto sched }
// goto header
- types := &f.Config.Types
- pt := types.Uintptr
+ cfgtypes := &f.Config.Types
+ pt := cfgtypes.Uintptr
g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
sp := test.NewValue0(bb.Pos, OpSP, pt)
cmpOp := OpLess64U
@@ -207,7 +210,7 @@ func insertLoopReschedChecks(f *Func) {
}
limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
- cmp := test.NewValue2(bb.Pos, cmpOp, types.Bool, sp, lim)
+ cmp := test.NewValue2(bb.Pos, cmpOp, cfgtypes.Bool, sp, lim)
test.SetControl(cmp)
// if true, goto sched
@@ -226,7 +229,7 @@ func insertLoopReschedChecks(f *Func) {
// mem1 := call resched (mem0)
// goto header
resched := f.fe.Syslook("goschedguarded")
- mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, TypeMem, resched, mem0)
+ mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, resched, mem0)
sched.AddEdgeTo(h)
headerMemPhi.AddArg(mem1)
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index 06edb033e3..1d9e5d1630 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -1,6 +1,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"strconv"
"testing"
)
@@ -16,13 +17,14 @@ func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000)
// nil checks, none of which can be eliminated.
// Run with multiple depths to observe big-O behavior.
func benchmarkNilCheckDeep(b *testing.B, depth int) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ c := testConfig(b)
+ ptrType := c.config.Types.BytePtr
var blocs []bloc
blocs = append(blocs,
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto(blockn(0)),
),
)
@@ -30,7 +32,7 @@ func benchmarkNilCheckDeep(b *testing.B, depth int) {
blocs = append(blocs,
Bloc(blockn(i),
Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
- Valu(booln(i), OpIsNonNil, TypeBool, 0, nil, ptrn(i)),
+ Valu(booln(i), OpIsNonNil, c.config.Types.Bool, 0, nil, ptrn(i)),
If(booln(i), blockn(i+1), "exit"),
),
)
@@ -40,7 +42,6 @@ func benchmarkNilCheckDeep(b *testing.B, depth int) {
Bloc("exit", Exit("mem")),
)
- c := testConfig(b)
fun := c.Fun("entry", blocs...)
CheckFunc(fun.f)
@@ -63,19 +64,19 @@ func isNilCheck(b *Block) bool {
// TestNilcheckSimple verifies that a second repeated nilcheck is removed.
func TestNilcheckSimple(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
- Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool1", "secondCheck", "exit")),
Bloc("secondCheck",
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool2", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -100,21 +101,21 @@ func TestNilcheckSimple(t *testing.T) {
// TestNilcheckDomOrder ensures that the nil check elimination isn't dependent
// on the order of the dominees.
func TestNilcheckDomOrder(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
- Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool1", "secondCheck", "exit")),
Bloc("exit",
Exit("mem")),
Bloc("secondCheck",
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool2", "extra", "exit")),
Bloc("extra",
Goto("exit")))
@@ -136,16 +137,16 @@ func TestNilcheckDomOrder(t *testing.T) {
// TestNilcheckAddr verifies that nilchecks of OpAddr constructed values are removed.
func TestNilcheckAddr(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
- Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool1", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -169,17 +170,17 @@ func TestNilcheckAddr(t *testing.T) {
// TestNilcheckAddPtr verifies that nilchecks of OpAddPtr constructed values are removed.
func TestNilcheckAddPtr(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
- Valu("off", OpConst64, TypeInt64, 20, nil),
+ Valu("off", OpConst64, c.config.Types.Int64, 20, nil),
Valu("ptr1", OpAddPtr, ptrType, 0, nil, "sb", "off"),
- Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool1", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -204,15 +205,15 @@ func TestNilcheckAddPtr(t *testing.T) {
// TestNilcheckPhi tests that nil checks of phis, for which all values are known to be
// non-nil are removed.
func TestNilcheckPhi(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("sp", OpSP, TypeInvalid, 0, nil),
- Valu("baddr", OpAddr, TypeBool, 0, "b", "sp"),
- Valu("bool1", OpLoad, TypeBool, 0, nil, "baddr", "mem"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("sp", OpSP, types.TypeInvalid, 0, nil),
+ Valu("baddr", OpAddr, c.config.Types.Bool, 0, "b", "sp"),
+ Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"),
If("bool1", "b1", "b2")),
Bloc("b1",
Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
@@ -223,7 +224,7 @@ func TestNilcheckPhi(t *testing.T) {
// both ptr1 and ptr2 are guaranteed non-nil here
Bloc("checkPtr",
Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr2"),
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "phi"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "phi"),
If("bool2", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -248,23 +249,23 @@ func TestNilcheckPhi(t *testing.T) {
// TestNilcheckKeepRemove verifies that duplicate checks of the same pointer
// are removed, but checks of different pointers are not.
func TestNilcheckKeepRemove(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
- Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool1", "differentCheck", "exit")),
Bloc("differentCheck",
Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"),
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr2"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr2"),
If("bool2", "secondCheck", "exit")),
Bloc("secondCheck",
- Valu("bool3", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool3", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -296,22 +297,22 @@ func TestNilcheckKeepRemove(t *testing.T) {
// TestNilcheckInFalseBranch tests that nil checks in the false branch of an nilcheck
// block are *not* removed.
func TestNilcheckInFalseBranch(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
- Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool1", "extra", "secondCheck")),
Bloc("secondCheck",
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool2", "extra", "thirdCheck")),
Bloc("thirdCheck",
- Valu("bool3", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool3", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -347,20 +348,20 @@ func TestNilcheckInFalseBranch(t *testing.T) {
// TestNilcheckUser verifies that a user nil check that dominates a generated nil check
// wil remove the generated nil check.
func TestNilcheckUser(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil),
- Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
If("bool1", "secondCheck", "exit")),
Bloc("secondCheck",
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool2", "extra", "exit")),
Bloc("extra",
Goto("exit")),
@@ -386,29 +387,29 @@ func TestNilcheckUser(t *testing.T) {
// TestNilcheckBug reproduces a bug in nilcheckelim found by compiling math/big
func TestNilcheckBug(t *testing.T) {
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto("checkPtr")),
Bloc("checkPtr",
Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
Valu("nilptr", OpConstNil, ptrType, 0, nil),
- Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
If("bool1", "secondCheck", "couldBeNil")),
Bloc("couldBeNil",
Goto("secondCheck")),
Bloc("secondCheck",
- Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
If("bool2", "extra", "exit")),
Bloc("extra",
// prevent fuse from eliminating this block
- Valu("store", OpStore, TypeMem, 0, ptrType, "ptr1", "nilptr", "mem"),
+ Valu("store", OpStore, types.TypeMem, 0, ptrType, "ptr1", "nilptr", "mem"),
Goto("exit")),
Bloc("exit",
- Valu("phi", OpPhi, TypeMem, 0, nil, "mem", "store"),
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "mem", "store"),
Exit("phi")))
CheckFunc(fun.f)
diff --git a/src/cmd/compile/internal/ssa/passbm_test.go b/src/cmd/compile/internal/ssa/passbm_test.go
index c316e9b67d..5e0a7eb3bb 100644
--- a/src/cmd/compile/internal/ssa/passbm_test.go
+++ b/src/cmd/compile/internal/ssa/passbm_test.go
@@ -4,6 +4,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"fmt"
"testing"
)
@@ -60,32 +61,32 @@ func benchFnBlock(b *testing.B, fn passFunc, bg blockGen) {
func genFunction(size int) []bloc {
var blocs []bloc
- elemType := &TypeImpl{Size_: 8, Name: "testtype"}
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr", Elem_: elemType} // dummy for testing
+ elemType := types.Types[types.TINT64]
+ ptrType := elemType.PtrTo()
valn := func(s string, m, n int) string { return fmt.Sprintf("%s%d-%d", s, m, n) }
blocs = append(blocs,
Bloc("entry",
- Valu(valn("store", 0, 4), OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
+ Valu(valn("store", 0, 4), OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
Goto(blockn(1)),
),
)
for i := 1; i < size+1; i++ {
blocs = append(blocs, Bloc(blockn(i),
- Valu(valn("v", i, 0), OpConstBool, TypeBool, 1, nil),
+ Valu(valn("v", i, 0), OpConstBool, types.Types[types.TBOOL], 1, nil),
Valu(valn("addr", i, 1), OpAddr, ptrType, 0, nil, "sb"),
Valu(valn("addr", i, 2), OpAddr, ptrType, 0, nil, "sb"),
Valu(valn("addr", i, 3), OpAddr, ptrType, 0, nil, "sb"),
- Valu(valn("zero", i, 1), OpZero, TypeMem, 8, elemType, valn("addr", i, 3),
+ Valu(valn("zero", i, 1), OpZero, types.TypeMem, 8, elemType, valn("addr", i, 3),
valn("store", i-1, 4)),
- Valu(valn("store", i, 1), OpStore, TypeMem, 0, elemType, valn("addr", i, 1),
+ Valu(valn("store", i, 1), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
valn("v", i, 0), valn("zero", i, 1)),
- Valu(valn("store", i, 2), OpStore, TypeMem, 0, elemType, valn("addr", i, 2),
+ Valu(valn("store", i, 2), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 2),
valn("v", i, 0), valn("store", i, 1)),
- Valu(valn("store", i, 3), OpStore, TypeMem, 0, elemType, valn("addr", i, 1),
+ Valu(valn("store", i, 3), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
valn("v", i, 0), valn("store", i, 2)),
- Valu(valn("store", i, 4), OpStore, TypeMem, 0, elemType, valn("addr", i, 3),
+ Valu(valn("store", i, 4), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 3),
valn("v", i, 0), valn("store", i, 3)),
Goto(blockn(i+1))))
}
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 08b4d3aca1..e0c73f92d3 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -114,6 +114,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/objabi"
"cmd/internal/src"
"fmt"
@@ -698,12 +699,12 @@ func (s *regAllocState) setState(regs []endReg) {
}
// compatRegs returns the set of registers which can store a type t.
-func (s *regAllocState) compatRegs(t Type) regMask {
+func (s *regAllocState) compatRegs(t *types.Type) regMask {
var m regMask
if t.IsTuple() || t.IsFlags() {
return 0
}
- if t.IsFloat() || t == TypeInt128 {
+ if t.IsFloat() || t == types.TypeInt128 {
m = s.f.Config.fpRegMask
} else {
m = s.f.Config.gpRegMask
@@ -2078,7 +2079,7 @@ func (e *edgeState) erase(loc Location) {
}
// findRegFor finds a register we can use to make a temp copy of type typ.
-func (e *edgeState) findRegFor(typ Type) Location {
+func (e *edgeState) findRegFor(typ *types.Type) Location {
// Which registers are possibilities.
var m regMask
types := &e.s.f.Config.Types
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index 3cbd164544..1e4dea8b27 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"testing"
)
@@ -13,11 +14,11 @@ func TestLiveControlOps(t *testing.T) {
c := testConfig(t)
f := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("x", OpAMD64MOVLconst, TypeInt8, 1, nil),
- Valu("y", OpAMD64MOVLconst, TypeInt8, 2, nil),
- Valu("a", OpAMD64TESTB, TypeFlags, 0, nil, "x", "y"),
- Valu("b", OpAMD64TESTB, TypeFlags, 0, nil, "y", "x"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpAMD64MOVLconst, c.config.Types.Int8, 1, nil),
+ Valu("y", OpAMD64MOVLconst, c.config.Types.Int8, 2, nil),
+ Valu("a", OpAMD64TESTB, types.TypeFlags, 0, nil, "x", "y"),
+ Valu("b", OpAMD64TESTB, types.TypeFlags, 0, nil, "y", "x"),
Eq("a", "if", "exit"),
),
Bloc("if",
@@ -41,23 +42,23 @@ func TestSpillWithLoop(t *testing.T) {
c := testConfig(t)
f := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("ptr", OpArg, TypeInt64Ptr, 0, c.Frontend().Auto(src.NoXPos, TypeInt64)),
- Valu("cond", OpArg, TypeBool, 0, c.Frontend().Auto(src.NoXPos, TypeBool)),
- Valu("ld", OpAMD64MOVQload, TypeInt64, 0, nil, "ptr", "mem"), // this value needs a spill
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Bool)),
+ Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
Goto("loop"),
),
Bloc("loop",
- Valu("memphi", OpPhi, TypeMem, 0, nil, "mem", "call"),
- Valu("call", OpAMD64CALLstatic, TypeMem, 0, nil, "memphi"),
- Valu("test", OpAMD64CMPBconst, TypeFlags, 0, nil, "cond"),
+ Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"),
+ Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, nil, "memphi"),
+ Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"),
Eq("test", "next", "exit"),
),
Bloc("next",
Goto("loop"),
),
Bloc("exit",
- Valu("store", OpAMD64MOVQstore, TypeMem, 0, nil, "ptr", "ld", "call"),
+ Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "ptr", "ld", "call"),
Exit("store"),
),
)
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 4f2f3c0b5b..f69ffc8c5c 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
"io"
@@ -84,39 +85,39 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
// Common functions called from rewriting rules
-func is64BitFloat(t Type) bool {
+func is64BitFloat(t *types.Type) bool {
return t.Size() == 8 && t.IsFloat()
}
-func is32BitFloat(t Type) bool {
+func is32BitFloat(t *types.Type) bool {
return t.Size() == 4 && t.IsFloat()
}
-func is64BitInt(t Type) bool {
+func is64BitInt(t *types.Type) bool {
return t.Size() == 8 && t.IsInteger()
}
-func is32BitInt(t Type) bool {
+func is32BitInt(t *types.Type) bool {
return t.Size() == 4 && t.IsInteger()
}
-func is16BitInt(t Type) bool {
+func is16BitInt(t *types.Type) bool {
return t.Size() == 2 && t.IsInteger()
}
-func is8BitInt(t Type) bool {
+func is8BitInt(t *types.Type) bool {
return t.Size() == 1 && t.IsInteger()
}
-func isPtr(t Type) bool {
+func isPtr(t *types.Type) bool {
return t.IsPtrShaped()
}
-func isSigned(t Type) bool {
+func isSigned(t *types.Type) bool {
return t.IsSigned()
}
-func typeSize(t Type) int64 {
+func typeSize(t *types.Type) int64 {
return t.Size()
}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index d6509ae368..21fc3f5c05 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValue386(v *Value) bool {
switch v.Op {
@@ -1528,7 +1530,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(Op386InvertFlags)
- v0 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v0.AuxInt = int64(int8(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -1713,7 +1715,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(Op386InvertFlags)
- v0 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -1914,7 +1916,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(Op386InvertFlags)
- v0 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v0.AuxInt = int64(int16(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -5056,8 +5058,8 @@ func rewriteValue386_Op386MOVSDconst_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVSDconst [c])
// cond: config.ctxt.Flag_shared
// result: (MOVSDconst2 (MOVSDconst1 [c]))
@@ -5067,7 +5069,7 @@ func rewriteValue386_Op386MOVSDconst_0(v *Value) bool {
break
}
v.reset(Op386MOVSDconst2)
- v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32)
v0.AuxInt = c
v.AddArg(v0)
return true
@@ -5545,8 +5547,8 @@ func rewriteValue386_Op386MOVSSconst_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVSSconst [c])
// cond: config.ctxt.Flag_shared
// result: (MOVSSconst2 (MOVSSconst1 [c]))
@@ -5556,7 +5558,7 @@ func rewriteValue386_Op386MOVSSconst_0(v *Value) bool {
break
}
v.reset(Op386MOVSSconst2)
- v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32)
v0.AuxInt = c
v.AddArg(v0)
return true
@@ -8372,8 +8374,8 @@ func rewriteValue386_Op386NOTL_0(v *Value) bool {
func rewriteValue386_Op386ORL_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL x (MOVLconst [c]))
// cond:
// result: (ORLconst [c] x)
@@ -8614,7 +8616,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -8628,8 +8630,8 @@ func rewriteValue386_Op386ORL_0(v *Value) bool {
func rewriteValue386_Op386ORL_10(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
@@ -8667,7 +8669,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -8738,7 +8740,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -8809,7 +8811,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -8880,7 +8882,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -8951,7 +8953,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -14101,8 +14103,8 @@ func rewriteValue386_OpDiv64F_0(v *Value) bool {
func rewriteValue386_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
@@ -14110,10 +14112,10 @@ func rewriteValue386_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -14122,8 +14124,8 @@ func rewriteValue386_OpDiv8_0(v *Value) bool {
func rewriteValue386_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
@@ -14131,10 +14133,10 @@ func rewriteValue386_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386DIVWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -14150,7 +14152,7 @@ func rewriteValue386_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14167,7 +14169,7 @@ func rewriteValue386_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14184,7 +14186,7 @@ func rewriteValue386_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14201,7 +14203,7 @@ func rewriteValue386_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14218,7 +14220,7 @@ func rewriteValue386_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14235,7 +14237,7 @@ func rewriteValue386_OpEqB_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14252,7 +14254,7 @@ func rewriteValue386_OpEqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14269,7 +14271,7 @@ func rewriteValue386_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14286,7 +14288,7 @@ func rewriteValue386_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14303,7 +14305,7 @@ func rewriteValue386_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14320,7 +14322,7 @@ func rewriteValue386_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14337,7 +14339,7 @@ func rewriteValue386_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14354,7 +14356,7 @@ func rewriteValue386_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14371,7 +14373,7 @@ func rewriteValue386_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14388,7 +14390,7 @@ func rewriteValue386_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14425,7 +14427,7 @@ func rewriteValue386_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETG)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14442,7 +14444,7 @@ func rewriteValue386_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETA)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14459,7 +14461,7 @@ func rewriteValue386_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETG)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14476,7 +14478,7 @@ func rewriteValue386_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14493,7 +14495,7 @@ func rewriteValue386_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETA)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14510,7 +14512,7 @@ func rewriteValue386_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14527,7 +14529,7 @@ func rewriteValue386_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETG)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14544,7 +14546,7 @@ func rewriteValue386_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETA)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14602,7 +14604,7 @@ func rewriteValue386_OpIsInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(Op386SETB)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -14618,7 +14620,7 @@ func rewriteValue386_OpIsNonNil_0(v *Value) bool {
for {
p := v.Args[0]
v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Pos, Op386TESTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags)
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
@@ -14635,7 +14637,7 @@ func rewriteValue386_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -14652,7 +14654,7 @@ func rewriteValue386_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14669,7 +14671,7 @@ func rewriteValue386_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14686,7 +14688,7 @@ func rewriteValue386_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14703,7 +14705,7 @@ func rewriteValue386_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14720,7 +14722,7 @@ func rewriteValue386_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14737,7 +14739,7 @@ func rewriteValue386_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14754,7 +14756,7 @@ func rewriteValue386_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14771,7 +14773,7 @@ func rewriteValue386_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14788,7 +14790,7 @@ func rewriteValue386_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETL)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14805,7 +14807,7 @@ func rewriteValue386_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETB)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14822,7 +14824,7 @@ func rewriteValue386_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETL)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14839,7 +14841,7 @@ func rewriteValue386_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14856,7 +14858,7 @@ func rewriteValue386_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETB)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14873,7 +14875,7 @@ func rewriteValue386_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14890,7 +14892,7 @@ func rewriteValue386_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETL)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14907,7 +14909,7 @@ func rewriteValue386_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETB)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15008,7 +15010,7 @@ func rewriteValue386_OpLsh16x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15032,7 +15034,7 @@ func rewriteValue386_OpLsh16x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15093,7 +15095,7 @@ func rewriteValue386_OpLsh16x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15117,7 +15119,7 @@ func rewriteValue386_OpLsh32x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15141,7 +15143,7 @@ func rewriteValue386_OpLsh32x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15202,7 +15204,7 @@ func rewriteValue386_OpLsh32x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15226,7 +15228,7 @@ func rewriteValue386_OpLsh8x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15250,7 +15252,7 @@ func rewriteValue386_OpLsh8x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15311,7 +15313,7 @@ func rewriteValue386_OpLsh8x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -15374,8 +15376,8 @@ func rewriteValue386_OpMod32u_0(v *Value) bool {
func rewriteValue386_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (MODW (SignExt8to16 x) (SignExt8to16 y))
@@ -15383,10 +15385,10 @@ func rewriteValue386_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386MODW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15395,8 +15397,8 @@ func rewriteValue386_OpMod8_0(v *Value) bool {
func rewriteValue386_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
@@ -15404,10 +15406,10 @@ func rewriteValue386_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386MODWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15416,8 +15418,8 @@ func rewriteValue386_OpMod8u_0(v *Value) bool {
func rewriteValue386_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -15443,7 +15445,7 @@ func rewriteValue386_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(Op386MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVBload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -15462,7 +15464,7 @@ func rewriteValue386_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(Op386MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -15481,7 +15483,7 @@ func rewriteValue386_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(Op386MOVLstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -15501,14 +15503,14 @@ func rewriteValue386_OpMove_0(v *Value) bool {
v.reset(Op386MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVBload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15529,14 +15531,14 @@ func rewriteValue386_OpMove_0(v *Value) bool {
v.reset(Op386MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVBload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15557,14 +15559,14 @@ func rewriteValue386_OpMove_0(v *Value) bool {
v.reset(Op386MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15585,14 +15587,14 @@ func rewriteValue386_OpMove_0(v *Value) bool {
v.reset(Op386MOVLstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15613,14 +15615,14 @@ func rewriteValue386_OpMove_0(v *Value) bool {
v.reset(Op386MOVLstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15649,9 +15651,9 @@ func rewriteValue386_OpMove_0(v *Value) bool {
v1.AuxInt = s % 4
v1.AddArg(src)
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
v2.AddArg(dst)
- v3 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
+ v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v3.AddArg(src)
v3.AddArg(mem)
v2.AddArg(v3)
@@ -15666,8 +15668,8 @@ func rewriteValue386_OpMove_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [s] dst src mem)
// cond: s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice
// result: (DUFFCOPY [10*(128-s/4)] dst src mem)
@@ -15700,7 +15702,7 @@ func rewriteValue386_OpMove_10(v *Value) bool {
v.reset(Op386REPMOVSL)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v0.AuxInt = s / 4
v.AddArg(v0)
v.AddArg(mem)
@@ -15813,11 +15815,11 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neg32F x)
// cond: !config.use387
- // result: (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
if !(!config.use387) {
@@ -15825,7 +15827,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool {
}
v.reset(Op386PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, Op386MOVSSconst, types.Float32)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
@@ -15849,11 +15851,11 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neg64F x)
// cond: !config.use387
- // result: (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
if !(!config.use387) {
@@ -15861,7 +15863,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool {
}
v.reset(Op386PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, Op386MOVSDconst, types.Float64)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
@@ -15901,7 +15903,7 @@ func rewriteValue386_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Pos, Op386CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15918,7 +15920,7 @@ func rewriteValue386_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15935,7 +15937,7 @@ func rewriteValue386_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNEF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15952,7 +15954,7 @@ func rewriteValue386_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNEF)
- v0 := b.NewValue0(v.Pos, Op386UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15969,7 +15971,7 @@ func rewriteValue386_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15986,7 +15988,7 @@ func rewriteValue386_OpNeqB_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Pos, Op386CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -16003,7 +16005,7 @@ func rewriteValue386_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Pos, Op386CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -16140,7 +16142,7 @@ func rewriteValue386_OpRsh16Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -16164,7 +16166,7 @@ func rewriteValue386_OpRsh16Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -16225,7 +16227,7 @@ func rewriteValue386_OpRsh16Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -16250,7 +16252,7 @@ func rewriteValue386_OpRsh16x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -16277,7 +16279,7 @@ func rewriteValue386_OpRsh16x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -16343,7 +16345,7 @@ func rewriteValue386_OpRsh16x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -16369,7 +16371,7 @@ func rewriteValue386_OpRsh32Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -16393,7 +16395,7 @@ func rewriteValue386_OpRsh32Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -16454,7 +16456,7 @@ func rewriteValue386_OpRsh32Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -16479,7 +16481,7 @@ func rewriteValue386_OpRsh32x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -16506,7 +16508,7 @@ func rewriteValue386_OpRsh32x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -16572,7 +16574,7 @@ func rewriteValue386_OpRsh32x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -16598,7 +16600,7 @@ func rewriteValue386_OpRsh8Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -16622,7 +16624,7 @@ func rewriteValue386_OpRsh8Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -16683,7 +16685,7 @@ func rewriteValue386_OpRsh8Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -16708,7 +16710,7 @@ func rewriteValue386_OpRsh8x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -16735,7 +16737,7 @@ func rewriteValue386_OpRsh8x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -16801,7 +16803,7 @@ func rewriteValue386_OpRsh8x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, Op386CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -16901,14 +16903,14 @@ func rewriteValue386_OpStaticCall_0(v *Value) bool {
}
func rewriteValue386_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(Op386MOVSDstore)
@@ -16918,14 +16920,14 @@ func rewriteValue386_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (MOVSSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(Op386MOVSSstore)
@@ -16935,14 +16937,14 @@ func rewriteValue386_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4
+ // cond: t.(*types.Type).Size() == 4
// result: (MOVLstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4) {
+ if !(t.(*types.Type).Size() == 4) {
break
}
v.reset(Op386MOVLstore)
@@ -16952,14 +16954,14 @@ func rewriteValue386_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(Op386MOVWstore)
@@ -16969,14 +16971,14 @@ func rewriteValue386_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(Op386MOVBstore)
@@ -17171,8 +17173,8 @@ func rewriteValue386_OpXor8_0(v *Value) bool {
func rewriteValue386_OpZero_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [0] _ mem)
// cond:
// result: mem
@@ -17243,7 +17245,7 @@ func rewriteValue386_OpZero_0(v *Value) bool {
v.reset(Op386MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -17262,7 +17264,7 @@ func rewriteValue386_OpZero_0(v *Value) bool {
v.reset(Op386MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -17281,7 +17283,7 @@ func rewriteValue386_OpZero_0(v *Value) bool {
v.reset(Op386MOVWstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -17300,7 +17302,7 @@ func rewriteValue386_OpZero_0(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 3)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -17319,11 +17321,11 @@ func rewriteValue386_OpZero_0(v *Value) bool {
}
v.reset(OpZero)
v.AuxInt = s - s%4
- v0 := b.NewValue0(v.Pos, Op386ADDLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32)
v0.AuxInt = s % 4
v0.AddArg(destptr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
@@ -17342,7 +17344,7 @@ func rewriteValue386_OpZero_0(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -17356,8 +17358,8 @@ func rewriteValue386_OpZero_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [12] destptr mem)
// cond:
// result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)))
@@ -17370,10 +17372,10 @@ func rewriteValue386_OpZero_10(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 8)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 4)
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
@@ -17393,13 +17395,13 @@ func rewriteValue386_OpZero_10(v *Value) bool {
v.reset(Op386MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 12)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 8)
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v1.AuxInt = makeValAndOff(0, 4)
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
@@ -17421,7 +17423,7 @@ func rewriteValue386_OpZero_10(v *Value) bool {
v.reset(Op386DUFFZERO)
v.AuxInt = 1 * (128 - s/4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -17439,10 +17441,10 @@ func rewriteValue386_OpZero_10(v *Value) bool {
}
v.reset(Op386REPSTOSL)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v0.AuxInt = s / 4
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
@@ -17495,7 +17497,7 @@ func rewriteValue386_OpZeromask_0(v *Value) bool {
v.reset(Op386XORLconst)
v.AuxInt = -1
v0 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
- v1 := b.NewValue0(v.Pos, Op386CMPLconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
v1.AuxInt = 1
v1.AddArg(x)
v0.AddArg(v1)
@@ -17508,8 +17510,8 @@ func rewriteBlock386(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case Block386EQ:
// match: (EQ (InvertFlags cmp) yes no)
@@ -17933,7 +17935,7 @@ func rewriteBlock386(b *Block) bool {
_ = v
cond := b.Control
b.Kind = Block386NE
- v0 := b.NewValue0(v.Pos, Op386TESTB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, Op386TESTB, types.TypeFlags)
v0.AddArg(cond)
v0.AddArg(cond)
b.SetControl(v0)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 1937af2b8f..ff24103eec 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueAMD64(v *Value) bool {
switch v.Op {
@@ -2446,7 +2448,7 @@ func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v0.AuxInt = int64(int8(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -2631,7 +2633,7 @@ func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -2838,7 +2840,7 @@ func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool {
break
}
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -3159,7 +3161,7 @@ func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v0.AuxInt = int64(int16(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -7151,8 +7153,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
@@ -7299,7 +7301,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
@@ -7357,8 +7359,8 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
// cond:
// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
@@ -7458,7 +7460,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
v.Aux = s
v.AddArg(p)
v.AddArg(i)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
@@ -7469,8 +7471,8 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
// cond:
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
@@ -7549,7 +7551,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
v0.AuxInt = 2
v0.AddArg(i)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v1)
v.AddArg(mem)
@@ -15073,8 +15075,8 @@ func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
// cond: v.Type.Size() == 1
// result: (ROLB x y)
@@ -15693,7 +15695,7 @@ func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -15707,8 +15709,8 @@ func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
@@ -15746,7 +15748,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -15792,7 +15794,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -15838,7 +15840,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -15897,7 +15899,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -15957,7 +15959,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -16017,7 +16019,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -16077,7 +16079,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -16245,8 +16247,8 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
@@ -16543,7 +16545,7 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16594,7 +16596,7 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16645,7 +16647,7 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16696,7 +16698,7 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16747,7 +16749,7 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16762,8 +16764,8 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -16805,7 +16807,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16856,7 +16858,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16907,7 +16909,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -16971,7 +16973,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17036,7 +17038,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17101,7 +17103,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17166,7 +17168,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17231,7 +17233,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17296,7 +17298,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17361,7 +17363,7 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17377,8 +17379,8 @@ func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
@@ -17433,7 +17435,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17498,7 +17500,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17563,7 +17565,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17628,7 +17630,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17693,7 +17695,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17758,7 +17760,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17823,7 +17825,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17888,7 +17890,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -17953,7 +17955,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -18005,7 +18007,7 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18018,8 +18020,8 @@ func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
@@ -18061,7 +18063,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18123,7 +18125,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18185,7 +18187,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18195,7 +18197,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -18243,9 +18245,9 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -18258,7 +18260,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -18306,9 +18308,9 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -18321,7 +18323,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
}
// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -18369,9 +18371,9 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -18384,7 +18386,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
}
// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -18432,9 +18434,9 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -18490,7 +18492,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18544,7 +18546,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18598,7 +18600,7 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18612,8 +18614,8 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
@@ -18659,7 +18661,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18713,7 +18715,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18767,7 +18769,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18821,7 +18823,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18875,7 +18877,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -18942,7 +18944,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19009,7 +19011,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19076,7 +19078,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19143,7 +19145,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19210,7 +19212,7 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19224,8 +19226,8 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
@@ -19284,7 +19286,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19351,7 +19353,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19418,7 +19420,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -19429,7 +19431,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19481,9 +19483,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19497,7 +19499,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19549,9 +19551,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19565,7 +19567,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19617,9 +19619,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19633,7 +19635,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19685,9 +19687,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19701,7 +19703,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19753,9 +19755,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19769,7 +19771,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19821,9 +19823,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19837,7 +19839,7 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
}
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19889,9 +19891,9 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19908,11 +19910,11 @@ func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLLconst {
@@ -19964,9 +19966,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -19980,7 +19982,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20032,9 +20034,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20048,7 +20050,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20100,9 +20102,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20116,7 +20118,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20168,9 +20170,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20184,7 +20186,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20236,9 +20238,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20252,7 +20254,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20304,9 +20306,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20320,7 +20322,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20372,9 +20374,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20388,7 +20390,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20440,9 +20442,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -20456,7 +20458,7 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
}
// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORL {
@@ -20508,9 +20510,9 @@ func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -21755,8 +21757,8 @@ func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ x x)
// cond:
// result: x
@@ -21807,7 +21809,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -21853,7 +21855,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -21899,7 +21901,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -21945,7 +21947,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -21991,7 +21993,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -22037,7 +22039,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -22096,7 +22098,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22156,7 +22158,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22216,7 +22218,7 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22231,8 +22233,8 @@ func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
@@ -22283,7 +22285,7 @@ func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22343,7 +22345,7 @@ func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22403,7 +22405,7 @@ func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22463,7 +22465,7 @@ func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22523,7 +22525,7 @@ func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22793,8 +22795,8 @@ func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
@@ -22989,7 +22991,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23040,7 +23042,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23091,7 +23093,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23142,7 +23144,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23193,7 +23195,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23244,7 +23246,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23295,7 +23297,7 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23310,8 +23312,8 @@ func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
@@ -23353,7 +23355,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23404,7 +23406,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23455,7 +23457,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23506,7 +23508,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23557,7 +23559,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23608,7 +23610,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23659,7 +23661,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23710,7 +23712,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23761,7 +23763,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23825,7 +23827,7 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -23841,8 +23843,8 @@ func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
@@ -23897,7 +23899,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -23962,7 +23964,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24027,7 +24029,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24092,7 +24094,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24157,7 +24159,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24222,7 +24224,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24287,7 +24289,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24352,7 +24354,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24417,7 +24419,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24482,7 +24484,7 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24498,8 +24500,8 @@ func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
@@ -24554,7 +24556,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24619,7 +24621,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24684,7 +24686,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24749,7 +24751,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24814,7 +24816,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24879,7 +24881,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -24944,7 +24946,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25009,7 +25011,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25074,7 +25076,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25139,7 +25141,7 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25155,8 +25157,8 @@ func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
@@ -25211,7 +25213,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25276,7 +25278,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25341,7 +25343,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25406,7 +25408,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25471,7 +25473,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25536,7 +25538,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25601,7 +25603,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25666,7 +25668,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25731,7 +25733,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25796,7 +25798,7 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25812,8 +25814,8 @@ func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
@@ -25868,7 +25870,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -25920,7 +25922,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -25969,7 +25971,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26031,7 +26033,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26093,7 +26095,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26149,7 +26151,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26205,7 +26207,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26215,7 +26217,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -26263,9 +26265,9 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26278,7 +26280,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -26326,9 +26328,9 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26341,7 +26343,7 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -26389,9 +26391,9 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26407,11 +26409,11 @@ func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -26459,9 +26461,9 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26474,7 +26476,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -26536,8 +26538,8 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26550,7 +26552,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -26612,8 +26614,8 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26626,7 +26628,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -26688,8 +26690,8 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26702,7 +26704,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLload [i0] {s} p mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -26764,8 +26766,8 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26821,7 +26823,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26875,7 +26877,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26929,7 +26931,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -26983,7 +26985,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27037,7 +27039,7 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27051,8 +27053,8 @@ func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
@@ -27098,7 +27100,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27152,7 +27154,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27206,7 +27208,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27273,7 +27275,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27340,7 +27342,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27407,7 +27409,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27474,7 +27476,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27541,7 +27543,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27608,7 +27610,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27675,7 +27677,7 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27689,8 +27691,8 @@ func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
@@ -27749,7 +27751,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27810,7 +27812,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27871,7 +27873,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27932,7 +27934,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -27993,7 +27995,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -28054,7 +28056,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -28115,7 +28117,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -28176,7 +28178,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -28237,7 +28239,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -28248,7 +28250,7 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28300,9 +28302,9 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28319,11 +28321,11 @@ func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28375,9 +28377,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28391,7 +28393,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28443,9 +28445,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28459,7 +28461,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28511,9 +28513,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28527,7 +28529,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28579,9 +28581,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28595,7 +28597,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28647,9 +28649,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28663,7 +28665,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28715,9 +28717,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28731,7 +28733,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -28783,9 +28785,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28799,7 +28801,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -28851,9 +28853,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28867,7 +28869,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -28919,9 +28921,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -28935,7 +28937,7 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -28987,9 +28989,9 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29006,11 +29008,11 @@ func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -29062,9 +29064,9 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29078,7 +29080,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -29130,9 +29132,9 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29146,7 +29148,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -29198,9 +29200,9 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29214,7 +29216,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -29266,9 +29268,9 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29282,7 +29284,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <types.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -29334,9 +29336,9 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
v2.AuxInt = 8
- v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29350,7 +29352,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29416,8 +29418,8 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29431,7 +29433,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29497,8 +29499,8 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29512,7 +29514,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29578,8 +29580,8 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29593,7 +29595,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29659,8 +29661,8 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29674,7 +29676,7 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29740,8 +29742,8 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29758,11 +29760,11 @@ func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29828,8 +29830,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29843,7 +29845,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29909,8 +29911,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -29924,7 +29926,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
s0 := v.Args[0]
if s0.Op != OpAMD64SHLQconst {
@@ -29990,8 +29992,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30005,7 +30007,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30071,8 +30073,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30086,7 +30088,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30152,8 +30154,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30167,7 +30169,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30233,8 +30235,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30248,7 +30250,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30314,8 +30316,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30329,7 +30331,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30395,8 +30397,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30410,7 +30412,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30476,8 +30478,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30491,7 +30493,7 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
}
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30557,8 +30559,8 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30575,11 +30577,11 @@ func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <types.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
for {
or := v.Args[0]
if or.Op != OpAMD64ORQ {
@@ -30645,8 +30647,8 @@ func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32457,7 +32459,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -32488,7 +32490,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -32519,7 +32521,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -32550,7 +32552,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -32570,7 +32572,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -32590,7 +32592,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -32614,7 +32616,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -32638,7 +32640,7 @@ func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
break
}
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -33056,7 +33058,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -33087,7 +33089,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -33118,7 +33120,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -33149,7 +33151,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -33169,7 +33171,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -33189,7 +33191,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -33213,7 +33215,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -33237,7 +33239,7 @@ func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
break
}
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
v.AddArg(v0)
@@ -35896,8 +35898,8 @@ func rewriteValueAMD64_OpAndB_0(v *Value) bool {
func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
@@ -35906,7 +35908,7 @@ func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
val := v.Args[1]
mem := v.Args[2]
v.reset(OpAMD64AddTupleFirst32)
- v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(types.UInt32, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
@@ -35918,8 +35920,8 @@ func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
@@ -35928,7 +35930,7 @@ func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool {
val := v.Args[1]
mem := v.Args[2]
v.reset(OpAMD64AddTupleFirst64)
- v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(types.UInt64, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
@@ -36095,17 +36097,17 @@ func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicStore32 ptr val mem)
// cond:
- // result: (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.UInt32, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
@@ -36116,17 +36118,17 @@ func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool {
func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicStore64 ptr val mem)
// cond:
- // result: (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.UInt64, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
@@ -36139,11 +36141,11 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 8
- // result: (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -36152,7 +36154,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
break
}
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.BytePtr, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
@@ -36161,7 +36163,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
}
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 4
- // result: (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -36170,7 +36172,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
break
}
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.BytePtr, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
@@ -36195,15 +36197,15 @@ func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
func rewriteValueAMD64_OpBitLen32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen32 x)
// cond:
- // result: (BitLen64 (MOVLQZX <types.UInt64> x))
+ // result: (BitLen64 (MOVLQZX <typ.UInt64> x))
for {
x := v.Args[0]
v.reset(OpBitLen64)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -36212,11 +36214,11 @@ func rewriteValueAMD64_OpBitLen32_0(v *Value) bool {
func rewriteValueAMD64_OpBitLen64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen64 <t> x)
// cond:
- // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
+ // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
for {
t := v.Type
x := v.Args[0]
@@ -36224,15 +36226,15 @@ func rewriteValueAMD64_OpBitLen64_0(v *Value) bool {
v.AuxInt = 1
v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
v1 := b.NewValue0(v.Pos, OpSelect0, t)
- v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v2.AddArg(x)
v1.AddArg(v2)
v0.AddArg(v1)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
v3.AuxInt = -1
v0.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags))
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v5.AddArg(x)
v4.AddArg(v5)
v0.AddArg(v4)
@@ -36471,17 +36473,17 @@ func rewriteValueAMD64_OpConvert_0(v *Value) bool {
func rewriteValueAMD64_OpCtz32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz32 x)
// cond:
- // result: (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x)))
+ // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x)))
for {
x := v.Args[0]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags))
- v1 := b.NewValue0(v.Pos, OpAMD64ORQ, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v2.AuxInt = 1 << 32
v1.AddArg(v2)
v1.AddArg(x)
@@ -36493,25 +36495,25 @@ func rewriteValueAMD64_OpCtz32_0(v *Value) bool {
func rewriteValueAMD64_OpCtz64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz64 <t> x)
// cond:
- // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
+ // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
for {
t := v.Type
x := v.Args[0]
v.reset(OpAMD64CMOVQEQ)
v0 := b.NewValue0(v.Pos, OpSelect0, t)
- v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
v2.AuxInt = 64
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v4.AddArg(x)
v3.AddArg(v4)
v.AddArg(v3)
@@ -36646,8 +36648,8 @@ func rewriteValueAMD64_OpDiv128u_0(v *Value) bool {
func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (Select0 (DIVW x y))
@@ -36655,7 +36657,7 @@ func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36665,8 +36667,8 @@ func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
func rewriteValueAMD64_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (Select0 (DIVWU x y))
@@ -36674,7 +36676,7 @@ func rewriteValueAMD64_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36684,8 +36686,8 @@ func rewriteValueAMD64_OpDiv16u_0(v *Value) bool {
func rewriteValueAMD64_OpDiv32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 x y)
// cond:
// result: (Select0 (DIVL x y))
@@ -36693,7 +36695,7 @@ func rewriteValueAMD64_OpDiv32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36716,8 +36718,8 @@ func rewriteValueAMD64_OpDiv32F_0(v *Value) bool {
func rewriteValueAMD64_OpDiv32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u x y)
// cond:
// result: (Select0 (DIVLU x y))
@@ -36725,7 +36727,7 @@ func rewriteValueAMD64_OpDiv32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36735,8 +36737,8 @@ func rewriteValueAMD64_OpDiv32u_0(v *Value) bool {
func rewriteValueAMD64_OpDiv64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div64 x y)
// cond:
// result: (Select0 (DIVQ x y))
@@ -36744,7 +36746,7 @@ func rewriteValueAMD64_OpDiv64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36767,8 +36769,8 @@ func rewriteValueAMD64_OpDiv64F_0(v *Value) bool {
func rewriteValueAMD64_OpDiv64u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div64u x y)
// cond:
// result: (Select0 (DIVQU x y))
@@ -36776,7 +36778,7 @@ func rewriteValueAMD64_OpDiv64u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36786,8 +36788,8 @@ func rewriteValueAMD64_OpDiv64u_0(v *Value) bool {
func rewriteValueAMD64_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
@@ -36795,11 +36797,11 @@ func rewriteValueAMD64_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -36809,8 +36811,8 @@ func rewriteValueAMD64_OpDiv8_0(v *Value) bool {
func rewriteValueAMD64_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
@@ -36818,11 +36820,11 @@ func rewriteValueAMD64_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -36839,7 +36841,7 @@ func rewriteValueAMD64_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36856,7 +36858,7 @@ func rewriteValueAMD64_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36873,7 +36875,7 @@ func rewriteValueAMD64_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36890,7 +36892,7 @@ func rewriteValueAMD64_OpEq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36907,7 +36909,7 @@ func rewriteValueAMD64_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36924,7 +36926,7 @@ func rewriteValueAMD64_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36941,7 +36943,7 @@ func rewriteValueAMD64_OpEqB_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36963,7 +36965,7 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
break
}
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36979,7 +36981,7 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
break
}
v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -36997,7 +36999,7 @@ func rewriteValueAMD64_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37014,7 +37016,7 @@ func rewriteValueAMD64_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37031,7 +37033,7 @@ func rewriteValueAMD64_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37048,7 +37050,7 @@ func rewriteValueAMD64_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37065,7 +37067,7 @@ func rewriteValueAMD64_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37082,7 +37084,7 @@ func rewriteValueAMD64_OpGeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37099,7 +37101,7 @@ func rewriteValueAMD64_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37116,7 +37118,7 @@ func rewriteValueAMD64_OpGeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37133,7 +37135,7 @@ func rewriteValueAMD64_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37150,7 +37152,7 @@ func rewriteValueAMD64_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37187,7 +37189,7 @@ func rewriteValueAMD64_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37204,7 +37206,7 @@ func rewriteValueAMD64_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37221,7 +37223,7 @@ func rewriteValueAMD64_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37238,7 +37240,7 @@ func rewriteValueAMD64_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37255,7 +37257,7 @@ func rewriteValueAMD64_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37272,7 +37274,7 @@ func rewriteValueAMD64_OpGreater64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37289,7 +37291,7 @@ func rewriteValueAMD64_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37306,7 +37308,7 @@ func rewriteValueAMD64_OpGreater64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37323,7 +37325,7 @@ func rewriteValueAMD64_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37340,7 +37342,7 @@ func rewriteValueAMD64_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37436,7 +37438,7 @@ func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -37457,7 +37459,7 @@ func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
break
}
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
@@ -37472,7 +37474,7 @@ func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
break
}
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64TESTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags)
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
@@ -37490,7 +37492,7 @@ func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -37507,7 +37509,7 @@ func rewriteValueAMD64_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37524,7 +37526,7 @@ func rewriteValueAMD64_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37541,7 +37543,7 @@ func rewriteValueAMD64_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37558,7 +37560,7 @@ func rewriteValueAMD64_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -37575,7 +37577,7 @@ func rewriteValueAMD64_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37592,7 +37594,7 @@ func rewriteValueAMD64_OpLeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37609,7 +37611,7 @@ func rewriteValueAMD64_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -37626,7 +37628,7 @@ func rewriteValueAMD64_OpLeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37643,7 +37645,7 @@ func rewriteValueAMD64_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37660,7 +37662,7 @@ func rewriteValueAMD64_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37677,7 +37679,7 @@ func rewriteValueAMD64_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37694,7 +37696,7 @@ func rewriteValueAMD64_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37711,7 +37713,7 @@ func rewriteValueAMD64_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37728,7 +37730,7 @@ func rewriteValueAMD64_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -37745,7 +37747,7 @@ func rewriteValueAMD64_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37762,7 +37764,7 @@ func rewriteValueAMD64_OpLess64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37779,7 +37781,7 @@ func rewriteValueAMD64_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -37796,7 +37798,7 @@ func rewriteValueAMD64_OpLess64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37813,7 +37815,7 @@ func rewriteValueAMD64_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37830,7 +37832,7 @@ func rewriteValueAMD64_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -37950,7 +37952,7 @@ func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -37974,7 +37976,7 @@ func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -37998,7 +38000,7 @@ func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38022,7 +38024,7 @@ func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38046,7 +38048,7 @@ func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38070,7 +38072,7 @@ func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38094,7 +38096,7 @@ func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38118,7 +38120,7 @@ func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38142,7 +38144,7 @@ func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -38166,7 +38168,7 @@ func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -38190,7 +38192,7 @@ func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -38214,7 +38216,7 @@ func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -38238,7 +38240,7 @@ func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38262,7 +38264,7 @@ func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38286,7 +38288,7 @@ func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38310,7 +38312,7 @@ func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -38321,8 +38323,8 @@ func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool {
func rewriteValueAMD64_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (Select1 (DIVW x y))
@@ -38330,7 +38332,7 @@ func rewriteValueAMD64_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -38340,8 +38342,8 @@ func rewriteValueAMD64_OpMod16_0(v *Value) bool {
func rewriteValueAMD64_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (Select1 (DIVWU x y))
@@ -38349,7 +38351,7 @@ func rewriteValueAMD64_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -38359,8 +38361,8 @@ func rewriteValueAMD64_OpMod16u_0(v *Value) bool {
func rewriteValueAMD64_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
// result: (Select1 (DIVL x y))
@@ -38368,7 +38370,7 @@ func rewriteValueAMD64_OpMod32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -38378,8 +38380,8 @@ func rewriteValueAMD64_OpMod32_0(v *Value) bool {
func rewriteValueAMD64_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
// result: (Select1 (DIVLU x y))
@@ -38387,7 +38389,7 @@ func rewriteValueAMD64_OpMod32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -38397,8 +38399,8 @@ func rewriteValueAMD64_OpMod32u_0(v *Value) bool {
func rewriteValueAMD64_OpMod64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod64 x y)
// cond:
// result: (Select1 (DIVQ x y))
@@ -38406,7 +38408,7 @@ func rewriteValueAMD64_OpMod64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -38416,8 +38418,8 @@ func rewriteValueAMD64_OpMod64_0(v *Value) bool {
func rewriteValueAMD64_OpMod64u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod64u x y)
// cond:
// result: (Select1 (DIVQU x y))
@@ -38425,7 +38427,7 @@ func rewriteValueAMD64_OpMod64u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -38435,8 +38437,8 @@ func rewriteValueAMD64_OpMod64u_0(v *Value) bool {
func rewriteValueAMD64_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
@@ -38444,11 +38446,11 @@ func rewriteValueAMD64_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -38458,8 +38460,8 @@ func rewriteValueAMD64_OpMod8_0(v *Value) bool {
func rewriteValueAMD64_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
@@ -38467,11 +38469,11 @@ func rewriteValueAMD64_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -38481,8 +38483,8 @@ func rewriteValueAMD64_OpMod8u_0(v *Value) bool {
func rewriteValueAMD64_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -38508,7 +38510,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpAMD64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -38527,7 +38529,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpAMD64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -38546,7 +38548,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpAMD64MOVLstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -38565,7 +38567,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpAMD64MOVQstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -38584,7 +38586,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpAMD64MOVOstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -38604,14 +38606,14 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -38632,14 +38634,14 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
v.reset(OpAMD64MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -38660,14 +38662,14 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -38688,14 +38690,14 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -38710,8 +38712,8 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [s] dst src mem)
// cond: s > 8 && s < 16
// result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
@@ -38726,14 +38728,14 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = s - 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = s - 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -38762,9 +38764,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool {
v1.AuxInt = s % 16
v1.AddArg(src)
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2.AddArg(dst)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v3.AddArg(src)
v3.AddArg(mem)
v2.AddArg(v3)
@@ -38793,9 +38795,9 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool {
v1.AuxInt = s % 16
v1.AddArg(src)
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v2.AddArg(dst)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, TypeInt128)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
v3.AddArg(src)
v3.AddArg(mem)
v2.AddArg(v3)
@@ -38835,7 +38837,7 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool {
v.reset(OpAMD64REPMOVSQ)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = s / 8
v.AddArg(v0)
v.AddArg(mem)
@@ -38959,16 +38961,16 @@ func rewriteValueAMD64_OpNeg32_0(v *Value) bool {
func rewriteValueAMD64_OpNeg32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neg32F x)
// cond:
- // result: (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
v.reset(OpAMD64PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, types.Float32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
@@ -38988,16 +38990,16 @@ func rewriteValueAMD64_OpNeg64_0(v *Value) bool {
func rewriteValueAMD64_OpNeg64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neg64F x)
// cond:
- // result: (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
v.reset(OpAMD64PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
@@ -39024,7 +39026,7 @@ func rewriteValueAMD64_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39041,7 +39043,7 @@ func rewriteValueAMD64_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39058,7 +39060,7 @@ func rewriteValueAMD64_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39075,7 +39077,7 @@ func rewriteValueAMD64_OpNeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39092,7 +39094,7 @@ func rewriteValueAMD64_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39109,7 +39111,7 @@ func rewriteValueAMD64_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39126,7 +39128,7 @@ func rewriteValueAMD64_OpNeqB_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39148,7 +39150,7 @@ func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
break
}
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39164,7 +39166,7 @@ func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
break
}
v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -39202,8 +39204,8 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OffPtr [off] ptr)
// cond: config.PtrSize == 8 && is32Bit(off)
// result: (ADDQconst [off] ptr)
@@ -39228,7 +39230,7 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
break
}
v.reset(OpAMD64ADDQ)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
@@ -39318,15 +39320,15 @@ func rewriteValueAMD64_OpOrB_0(v *Value) bool {
func rewriteValueAMD64_OpPopCount16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (PopCount16 x)
// cond:
- // result: (POPCNTL (MOVWQZX <types.UInt32> x))
+ // result: (POPCNTL (MOVWQZX <typ.UInt32> x))
for {
x := v.Args[0]
v.reset(OpAMD64POPCNTL)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -39357,15 +39359,15 @@ func rewriteValueAMD64_OpPopCount64_0(v *Value) bool {
func rewriteValueAMD64_OpPopCount8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (PopCount8 x)
// cond:
- // result: (POPCNTL (MOVBQZX <types.UInt32> x))
+ // result: (POPCNTL (MOVBQZX <typ.UInt32> x))
for {
x := v.Args[0]
v.reset(OpAMD64POPCNTL)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -39411,7 +39413,7 @@ func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -39435,7 +39437,7 @@ func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -39459,7 +39461,7 @@ func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -39483,7 +39485,7 @@ func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 16
v2.AddArg(y)
v1.AddArg(v2)
@@ -39508,7 +39510,7 @@ func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -39535,7 +39537,7 @@ func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -39562,7 +39564,7 @@ func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -39589,7 +39591,7 @@ func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v3.AuxInt = 16
v3.AddArg(y)
v2.AddArg(v3)
@@ -39615,7 +39617,7 @@ func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -39639,7 +39641,7 @@ func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -39663,7 +39665,7 @@ func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -39687,7 +39689,7 @@ func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 32
v2.AddArg(y)
v1.AddArg(v2)
@@ -39712,7 +39714,7 @@ func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -39739,7 +39741,7 @@ func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -39766,7 +39768,7 @@ func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -39793,7 +39795,7 @@ func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v3.AuxInt = 32
v3.AddArg(y)
v2.AddArg(v3)
@@ -39819,7 +39821,7 @@ func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -39843,7 +39845,7 @@ func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -39867,7 +39869,7 @@ func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -39891,7 +39893,7 @@ func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v1.AddArg(v2)
@@ -39916,7 +39918,7 @@ func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
@@ -39943,7 +39945,7 @@ func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
@@ -39970,7 +39972,7 @@ func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
@@ -39997,7 +39999,7 @@ func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v2.AddArg(v3)
@@ -40023,7 +40025,7 @@ func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -40047,7 +40049,7 @@ func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -40071,7 +40073,7 @@ func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -40095,7 +40097,7 @@ func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v2.AuxInt = 8
v2.AddArg(y)
v1.AddArg(v2)
@@ -40120,7 +40122,7 @@ func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -40147,7 +40149,7 @@ func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -40174,7 +40176,7 @@ func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -40201,7 +40203,7 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
v3.AuxInt = 8
v3.AddArg(y)
v2.AddArg(v3)
@@ -40392,14 +40394,14 @@ func rewriteValueAMD64_OpStaticCall_0(v *Value) bool {
}
func rewriteValueAMD64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpAMD64MOVSDstore)
@@ -40409,14 +40411,14 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (MOVSSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpAMD64MOVSSstore)
@@ -40426,14 +40428,14 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8
+ // cond: t.(*types.Type).Size() == 8
// result: (MOVQstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8) {
+ if !(t.(*types.Type).Size() == 8) {
break
}
v.reset(OpAMD64MOVQstore)
@@ -40443,14 +40445,14 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4
+ // cond: t.(*types.Type).Size() == 4
// result: (MOVLstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4) {
+ if !(t.(*types.Type).Size() == 4) {
break
}
v.reset(OpAMD64MOVLstore)
@@ -40460,14 +40462,14 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpAMD64MOVWstore)
@@ -40477,14 +40479,14 @@ func rewriteValueAMD64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpAMD64MOVBstore)
@@ -40820,7 +40822,7 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -40839,7 +40841,7 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool {
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -40858,7 +40860,7 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool {
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -40877,7 +40879,7 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool {
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 3)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -40900,7 +40902,7 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool {
v0.AuxInt = s % 8
v0.AddArg(destptr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
@@ -40914,8 +40916,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [16] destptr mem)
// cond:
// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))
@@ -40928,7 +40930,7 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 8)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -40947,10 +40949,10 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 16)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 8)
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
@@ -40970,13 +40972,13 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 24)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v0.AuxInt = makeValAndOff(0, 16)
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v1.AuxInt = makeValAndOff(0, 8)
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
@@ -41001,9 +41003,9 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
v0.AuxInt = 8
v0.AddArg(destptr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -41023,7 +41025,7 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
v.reset(OpAMD64DUFFZERO)
v.AuxInt = s
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, TypeInt128)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -41041,10 +41043,10 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool {
}
v.reset(OpAMD64REPSTOSQ)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = s / 8
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
@@ -41123,8 +41125,8 @@ func rewriteBlockAMD64(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockAMD64EQ:
// match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
@@ -41152,7 +41154,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -41183,7 +41185,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -41214,7 +41216,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -41245,7 +41247,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -41265,7 +41267,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -41285,7 +41287,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -41309,7 +41311,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -41333,7 +41335,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64UGE
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -41760,7 +41762,7 @@ func rewriteBlockAMD64(b *Block) bool {
_ = v
cond := b.Control
b.Kind = BlockAMD64NE
- v0 := b.NewValue0(v.Pos, OpAMD64TESTB, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags)
v0.AddArg(cond)
v0.AddArg(cond)
b.SetControl(v0)
@@ -42425,7 +42427,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -42456,7 +42458,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -42487,7 +42489,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -42518,7 +42520,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
b.SetControl(v0)
@@ -42538,7 +42540,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -42558,7 +42560,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -42582,7 +42584,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
@@ -42606,7 +42608,7 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
b.Kind = BlockAMD64ULT
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
v0.AuxInt = log2(c)
v0.AddArg(x)
b.SetControl(v0)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 505c96a54d..0ca4e41e4e 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueARM(v *Value) bool {
switch v.Op {
@@ -4135,7 +4137,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -4170,7 +4172,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
@@ -4206,7 +4208,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
@@ -4242,7 +4244,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
@@ -4278,7 +4280,7 @@ func rewriteValueARM_OpARMCMP_0(v *Value) bool {
z := v_0.Args[1]
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v0.AddArg(z)
@@ -4319,7 +4321,7 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool {
z := v_0.Args[1]
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v0.AddArg(z)
@@ -4355,7 +4357,7 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool {
z := v_0.Args[1]
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v0.AddArg(z)
@@ -4562,7 +4564,7 @@ func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
v1.AuxInt = d
@@ -4604,7 +4606,7 @@ func rewriteValueARM_OpARMCMPshiftLLreg_0(v *Value) bool {
x := v.Args[1]
y := v.Args[2]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v1.AddArg(x)
@@ -4647,7 +4649,7 @@ func rewriteValueARM_OpARMCMPshiftRA_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
v1.AuxInt = d
@@ -4689,7 +4691,7 @@ func rewriteValueARM_OpARMCMPshiftRAreg_0(v *Value) bool {
x := v.Args[1]
y := v.Args[2]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
v1.AddArg(x)
@@ -4732,7 +4734,7 @@ func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
v1.AuxInt = d
@@ -4774,7 +4776,7 @@ func rewriteValueARM_OpARMCMPshiftRLreg_0(v *Value) bool {
x := v.Args[1]
y := v.Args[2]
v.reset(OpARMInvertFlags)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v1.AddArg(x)
@@ -13907,8 +13909,8 @@ func rewriteValueARM_OpCvt64Fto32U_0(v *Value) bool {
func rewriteValueARM_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (Div32 (SignExt16to32 x) (SignExt16to32 y))
@@ -13916,10 +13918,10 @@ func rewriteValueARM_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -13928,8 +13930,8 @@ func rewriteValueARM_OpDiv16_0(v *Value) bool {
func rewriteValueARM_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -13937,10 +13939,10 @@ func rewriteValueARM_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -13949,51 +13951,51 @@ func rewriteValueARM_OpDiv16u_0(v *Value) bool {
func rewriteValueARM_OpDiv32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 x y)
// cond:
- // result: (SUB (XOR <types.UInt32> (Select0 <types.UInt32> (CALLudiv (SUB <types.UInt32> (XOR x <types.UInt32> (Signmask x)) (Signmask x)) (SUB <types.UInt32> (XOR y <types.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <types.UInt32> x y))) (Signmask (XOR <types.UInt32> x y)))
+ // result: (SUB (XOR <typ.UInt32> (Select0 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSUB)
- v0 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpSelect0, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32))
- v3 := b.NewValue0(v.Pos, OpARMSUB, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v5.AddArg(x)
v4.AddArg(v5)
v3.AddArg(v4)
- v6 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v6.AddArg(x)
v3.AddArg(v6)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpARMSUB, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
v8.AddArg(y)
- v9 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v9.AddArg(y)
v8.AddArg(v9)
v7.AddArg(v8)
- v10 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v10.AddArg(y)
v7.AddArg(v10)
v2.AddArg(v7)
v1.AddArg(v2)
v0.AddArg(v1)
- v11 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
- v12 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v12 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
v12.AddArg(x)
v12.AddArg(y)
v11.AddArg(v12)
v0.AddArg(v11)
v.AddArg(v0)
- v13 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
- v14 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
+ v13 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v14 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
v14.AddArg(x)
v14.AddArg(y)
v13.AddArg(v14)
@@ -14017,17 +14019,17 @@ func rewriteValueARM_OpDiv32F_0(v *Value) bool {
func rewriteValueARM_OpDiv32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u x y)
// cond:
- // result: (Select0 <types.UInt32> (CALLudiv x y))
+ // result: (Select0 <typ.UInt32> (CALLudiv x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v.Type = types.UInt32
- v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32))
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14050,8 +14052,8 @@ func rewriteValueARM_OpDiv64F_0(v *Value) bool {
func rewriteValueARM_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (Div32 (SignExt8to32 x) (SignExt8to32 y))
@@ -14059,10 +14061,10 @@ func rewriteValueARM_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -14071,8 +14073,8 @@ func rewriteValueARM_OpDiv8_0(v *Value) bool {
func rewriteValueARM_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -14080,10 +14082,10 @@ func rewriteValueARM_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -14092,8 +14094,8 @@ func rewriteValueARM_OpDiv8u_0(v *Value) bool {
func rewriteValueARM_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond:
// result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -14101,11 +14103,11 @@ func rewriteValueARM_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14122,7 +14124,7 @@ func rewriteValueARM_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14139,7 +14141,7 @@ func rewriteValueARM_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14156,7 +14158,7 @@ func rewriteValueARM_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14166,8 +14168,8 @@ func rewriteValueARM_OpEq64F_0(v *Value) bool {
func rewriteValueARM_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond:
// result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -14175,11 +14177,11 @@ func rewriteValueARM_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14189,17 +14191,17 @@ func rewriteValueARM_OpEq8_0(v *Value) bool {
func rewriteValueARM_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
- // result: (XORconst [1] (XOR <types.Bool> x y))
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpARMXOR, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14216,7 +14218,7 @@ func rewriteValueARM_OpEqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14226,8 +14228,8 @@ func rewriteValueARM_OpEqPtr_0(v *Value) bool {
func rewriteValueARM_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
@@ -14235,11 +14237,11 @@ func rewriteValueARM_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14249,8 +14251,8 @@ func rewriteValueARM_OpGeq16_0(v *Value) bool {
func rewriteValueARM_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -14258,11 +14260,11 @@ func rewriteValueARM_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14279,7 +14281,7 @@ func rewriteValueARM_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14296,7 +14298,7 @@ func rewriteValueARM_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14313,7 +14315,7 @@ func rewriteValueARM_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14330,7 +14332,7 @@ func rewriteValueARM_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14340,8 +14342,8 @@ func rewriteValueARM_OpGeq64F_0(v *Value) bool {
func rewriteValueARM_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
@@ -14349,11 +14351,11 @@ func rewriteValueARM_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14363,8 +14365,8 @@ func rewriteValueARM_OpGeq8_0(v *Value) bool {
func rewriteValueARM_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -14372,11 +14374,11 @@ func rewriteValueARM_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14395,8 +14397,8 @@ func rewriteValueARM_OpGetClosurePtr_0(v *Value) bool {
func rewriteValueARM_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
@@ -14404,11 +14406,11 @@ func rewriteValueARM_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14418,8 +14420,8 @@ func rewriteValueARM_OpGreater16_0(v *Value) bool {
func rewriteValueARM_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -14427,11 +14429,11 @@ func rewriteValueARM_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14448,7 +14450,7 @@ func rewriteValueARM_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14465,7 +14467,7 @@ func rewriteValueARM_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMPF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14482,7 +14484,7 @@ func rewriteValueARM_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14499,7 +14501,7 @@ func rewriteValueARM_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14509,8 +14511,8 @@ func rewriteValueARM_OpGreater64F_0(v *Value) bool {
func rewriteValueARM_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
@@ -14518,11 +14520,11 @@ func rewriteValueARM_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14532,8 +14534,8 @@ func rewriteValueARM_OpGreater8_0(v *Value) bool {
func rewriteValueARM_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -14541,11 +14543,11 @@ func rewriteValueARM_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14603,7 +14605,7 @@ func rewriteValueARM_OpIsInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -14619,7 +14621,7 @@ func rewriteValueARM_OpIsNonNil_0(v *Value) bool {
for {
ptr := v.Args[0]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(ptr)
v.AddArg(v0)
@@ -14636,7 +14638,7 @@ func rewriteValueARM_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -14646,8 +14648,8 @@ func rewriteValueARM_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValueARM_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
@@ -14655,11 +14657,11 @@ func rewriteValueARM_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14669,8 +14671,8 @@ func rewriteValueARM_OpLeq16_0(v *Value) bool {
func rewriteValueARM_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -14678,11 +14680,11 @@ func rewriteValueARM_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14699,7 +14701,7 @@ func rewriteValueARM_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14716,7 +14718,7 @@ func rewriteValueARM_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14733,7 +14735,7 @@ func rewriteValueARM_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14750,7 +14752,7 @@ func rewriteValueARM_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14760,8 +14762,8 @@ func rewriteValueARM_OpLeq64F_0(v *Value) bool {
func rewriteValueARM_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
@@ -14769,11 +14771,11 @@ func rewriteValueARM_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14783,8 +14785,8 @@ func rewriteValueARM_OpLeq8_0(v *Value) bool {
func rewriteValueARM_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -14792,11 +14794,11 @@ func rewriteValueARM_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14806,8 +14808,8 @@ func rewriteValueARM_OpLeq8U_0(v *Value) bool {
func rewriteValueARM_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
@@ -14815,11 +14817,11 @@ func rewriteValueARM_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14829,8 +14831,8 @@ func rewriteValueARM_OpLess16_0(v *Value) bool {
func rewriteValueARM_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -14838,11 +14840,11 @@ func rewriteValueARM_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14859,7 +14861,7 @@ func rewriteValueARM_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14876,7 +14878,7 @@ func rewriteValueARM_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMPF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14893,7 +14895,7 @@ func rewriteValueARM_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -14910,7 +14912,7 @@ func rewriteValueARM_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Pos, OpARMCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -14920,8 +14922,8 @@ func rewriteValueARM_OpLess64F_0(v *Value) bool {
func rewriteValueARM_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
@@ -14929,11 +14931,11 @@ func rewriteValueARM_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -14943,8 +14945,8 @@ func rewriteValueARM_OpLess8_0(v *Value) bool {
func rewriteValueARM_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -14952,11 +14954,11 @@ func rewriteValueARM_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -15089,8 +15091,8 @@ func rewriteValueARM_OpLoad_0(v *Value) bool {
func rewriteValueARM_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 x y)
// cond:
// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
@@ -15101,13 +15103,13 @@ func rewriteValueARM_OpLsh16x16_0(v *Value) bool {
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
@@ -15129,7 +15131,7 @@ func rewriteValueARM_OpLsh16x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
@@ -15176,8 +15178,8 @@ func rewriteValueARM_OpLsh16x64_0(v *Value) bool {
func rewriteValueARM_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 x y)
// cond:
// result: (SLL x (ZeroExt8to32 y))
@@ -15186,7 +15188,7 @@ func rewriteValueARM_OpLsh16x8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARMSLL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
return true
@@ -15195,8 +15197,8 @@ func rewriteValueARM_OpLsh16x8_0(v *Value) bool {
func rewriteValueARM_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 x y)
// cond:
// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
@@ -15207,13 +15209,13 @@ func rewriteValueARM_OpLsh32x16_0(v *Value) bool {
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
@@ -15235,7 +15237,7 @@ func rewriteValueARM_OpLsh32x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
@@ -15282,8 +15284,8 @@ func rewriteValueARM_OpLsh32x64_0(v *Value) bool {
func rewriteValueARM_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 x y)
// cond:
// result: (SLL x (ZeroExt8to32 y))
@@ -15292,7 +15294,7 @@ func rewriteValueARM_OpLsh32x8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARMSLL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
return true
@@ -15301,8 +15303,8 @@ func rewriteValueARM_OpLsh32x8_0(v *Value) bool {
func rewriteValueARM_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 x y)
// cond:
// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
@@ -15313,13 +15315,13 @@ func rewriteValueARM_OpLsh8x16_0(v *Value) bool {
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
@@ -15341,7 +15343,7 @@ func rewriteValueARM_OpLsh8x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
@@ -15388,8 +15390,8 @@ func rewriteValueARM_OpLsh8x64_0(v *Value) bool {
func rewriteValueARM_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 x y)
// cond:
// result: (SLL x (ZeroExt8to32 y))
@@ -15398,7 +15400,7 @@ func rewriteValueARM_OpLsh8x8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARMSLL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
return true
@@ -15407,8 +15409,8 @@ func rewriteValueARM_OpLsh8x8_0(v *Value) bool {
func rewriteValueARM_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
@@ -15416,10 +15418,10 @@ func rewriteValueARM_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15428,8 +15430,8 @@ func rewriteValueARM_OpMod16_0(v *Value) bool {
func rewriteValueARM_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -15437,10 +15439,10 @@ func rewriteValueARM_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15449,47 +15451,47 @@ func rewriteValueARM_OpMod16u_0(v *Value) bool {
func rewriteValueARM_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
- // result: (SUB (XOR <types.UInt32> (Select1 <types.UInt32> (CALLudiv (SUB <types.UInt32> (XOR <types.UInt32> x (Signmask x)) (Signmask x)) (SUB <types.UInt32> (XOR <types.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
+ // result: (SUB (XOR <typ.UInt32> (Select1 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSUB)
- v0 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpSelect1, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32))
- v3 := b.NewValue0(v.Pos, OpARMSUB, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v5.AddArg(x)
v4.AddArg(v5)
v3.AddArg(v4)
- v6 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v6 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v6.AddArg(x)
v3.AddArg(v6)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpARMSUB, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpARMXOR, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
v8.AddArg(y)
- v9 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v9.AddArg(y)
v8.AddArg(v9)
v7.AddArg(v8)
- v10 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v10 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v10.AddArg(y)
v7.AddArg(v10)
v2.AddArg(v7)
v1.AddArg(v2)
v0.AddArg(v1)
- v11 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v11 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v11.AddArg(x)
v0.AddArg(v11)
v.AddArg(v0)
- v12 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v12 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v12.AddArg(x)
v.AddArg(v12)
return true
@@ -15498,17 +15500,17 @@ func rewriteValueARM_OpMod32_0(v *Value) bool {
func rewriteValueARM_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
- // result: (Select1 <types.UInt32> (CALLudiv x y))
+ // result: (Select1 <typ.UInt32> (CALLudiv x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v.Type = types.UInt32
- v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(types.UInt32, types.UInt32))
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -15518,8 +15520,8 @@ func rewriteValueARM_OpMod32u_0(v *Value) bool {
func rewriteValueARM_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
@@ -15527,10 +15529,10 @@ func rewriteValueARM_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15539,8 +15541,8 @@ func rewriteValueARM_OpMod8_0(v *Value) bool {
func rewriteValueARM_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -15548,10 +15550,10 @@ func rewriteValueARM_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15562,8 +15564,8 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -15589,7 +15591,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpARMMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -15597,7 +15599,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore dst (MOVHUload src mem) mem)
for {
if v.AuxInt != 2 {
@@ -15607,12 +15609,12 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -15632,14 +15634,14 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15648,7 +15650,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
@@ -15658,12 +15660,12 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpARMMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -15671,7 +15673,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
if v.AuxInt != 4 {
@@ -15681,20 +15683,20 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVHUload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -15715,30 +15717,30 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -15761,22 +15763,22 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARMMOVBUload, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -15786,7 +15788,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice
// result: (DUFFCOPY [8 * (128 - int64(s/4))] dst src mem)
for {
s := v.AuxInt
@@ -15794,7 +15796,7 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice) {
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpARMDUFFCOPY)
@@ -15805,23 +15807,23 @@ func rewriteValueARM_OpMove_0(v *Value) bool {
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0
+ // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !((s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0) {
+ if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) {
break
}
v.reset(OpARMLoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
@@ -15968,8 +15970,8 @@ func rewriteValueARM_OpNeg8_0(v *Value) bool {
func rewriteValueARM_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond:
// result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -15977,11 +15979,11 @@ func rewriteValueARM_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -15998,7 +16000,7 @@ func rewriteValueARM_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -16015,7 +16017,7 @@ func rewriteValueARM_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -16032,7 +16034,7 @@ func rewriteValueARM_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -16042,8 +16044,8 @@ func rewriteValueARM_OpNeq64F_0(v *Value) bool {
func rewriteValueARM_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond:
// result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -16051,11 +16053,11 @@ func rewriteValueARM_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -16085,7 +16087,7 @@ func rewriteValueARM_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -16223,8 +16225,8 @@ func rewriteValueARM_OpRound64F_0(v *Value) bool {
func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
@@ -16234,16 +16236,16 @@ func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool {
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v3.AuxInt = 256
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -16253,8 +16255,8 @@ func rewriteValueARM_OpRsh16Ux16_0(v *Value) bool {
func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
@@ -16264,12 +16266,12 @@ func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool {
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
v2.AddArg(y)
v.AddArg(v2)
@@ -16279,11 +16281,11 @@ func rewriteValueARM_OpRsh16Ux32_0(v *Value) bool {
func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint64(c) < 16
- // result: (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -16296,7 +16298,7 @@ func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpARMSRLconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -16323,8 +16325,8 @@ func rewriteValueARM_OpRsh16Ux64_0(v *Value) bool {
func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 x y)
// cond:
// result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
@@ -16332,10 +16334,10 @@ func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -16344,8 +16346,8 @@ func rewriteValueARM_OpRsh16Ux8_0(v *Value) bool {
func rewriteValueARM_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 x y)
// cond:
// result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
@@ -16353,15 +16355,15 @@ func rewriteValueARM_OpRsh16x16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
@@ -16371,8 +16373,8 @@ func rewriteValueARM_OpRsh16x16_0(v *Value) bool {
func rewriteValueARM_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 x y)
// cond:
// result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
@@ -16380,11 +16382,11 @@ func rewriteValueARM_OpRsh16x32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
@@ -16394,11 +16396,11 @@ func rewriteValueARM_OpRsh16x32_0(v *Value) bool {
func rewriteValueARM_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
- // result: (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -16411,7 +16413,7 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpARMSRAconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -16419,7 +16421,7 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool {
}
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) >= 16
- // result: (SRAconst (SLLconst <types.UInt32> x [16]) [31])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -16432,7 +16434,7 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpARMSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -16443,8 +16445,8 @@ func rewriteValueARM_OpRsh16x64_0(v *Value) bool {
func rewriteValueARM_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 x y)
// cond:
// result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
@@ -16452,10 +16454,10 @@ func rewriteValueARM_OpRsh16x8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -16464,8 +16466,8 @@ func rewriteValueARM_OpRsh16x8_0(v *Value) bool {
func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
@@ -16476,13 +16478,13 @@ func rewriteValueARM_OpRsh32Ux16_0(v *Value) bool {
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
@@ -16504,7 +16506,7 @@ func rewriteValueARM_OpRsh32Ux32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
@@ -16551,8 +16553,8 @@ func rewriteValueARM_OpRsh32Ux64_0(v *Value) bool {
func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 x y)
// cond:
// result: (SRL x (ZeroExt8to32 y))
@@ -16561,7 +16563,7 @@ func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARMSRL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
return true
@@ -16570,8 +16572,8 @@ func rewriteValueARM_OpRsh32Ux8_0(v *Value) bool {
func rewriteValueARM_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 x y)
// cond:
// result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
@@ -16580,12 +16582,12 @@ func rewriteValueARM_OpRsh32x16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARMSRAcond)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
@@ -16604,7 +16606,7 @@ func rewriteValueARM_OpRsh32x32_0(v *Value) bool {
v.reset(OpARMSRAcond)
v.AddArg(x)
v.AddArg(y)
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = 256
v0.AddArg(y)
v.AddArg(v0)
@@ -16653,8 +16655,8 @@ func rewriteValueARM_OpRsh32x64_0(v *Value) bool {
func rewriteValueARM_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 x y)
// cond:
// result: (SRA x (ZeroExt8to32 y))
@@ -16663,7 +16665,7 @@ func rewriteValueARM_OpRsh32x8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARMSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
return true
@@ -16672,8 +16674,8 @@ func rewriteValueARM_OpRsh32x8_0(v *Value) bool {
func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
@@ -16683,16 +16685,16 @@ func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool {
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v3.AuxInt = 256
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -16702,8 +16704,8 @@ func rewriteValueARM_OpRsh8Ux16_0(v *Value) bool {
func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
@@ -16713,12 +16715,12 @@ func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool {
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
v2.AddArg(y)
v.AddArg(v2)
@@ -16728,11 +16730,11 @@ func rewriteValueARM_OpRsh8Ux32_0(v *Value) bool {
func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint64(c) < 8
- // result: (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -16745,7 +16747,7 @@ func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpARMSRLconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -16772,8 +16774,8 @@ func rewriteValueARM_OpRsh8Ux64_0(v *Value) bool {
func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 x y)
// cond:
// result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -16781,10 +16783,10 @@ func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -16793,8 +16795,8 @@ func rewriteValueARM_OpRsh8Ux8_0(v *Value) bool {
func rewriteValueARM_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 x y)
// cond:
// result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
@@ -16802,15 +16804,15 @@ func rewriteValueARM_OpRsh8x16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
@@ -16820,8 +16822,8 @@ func rewriteValueARM_OpRsh8x16_0(v *Value) bool {
func rewriteValueARM_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 x y)
// cond:
// result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
@@ -16829,11 +16831,11 @@ func rewriteValueARM_OpRsh8x32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
@@ -16843,11 +16845,11 @@ func rewriteValueARM_OpRsh8x32_0(v *Value) bool {
func rewriteValueARM_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
- // result: (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -16860,7 +16862,7 @@ func rewriteValueARM_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpARMSRAconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -16868,7 +16870,7 @@ func rewriteValueARM_OpRsh8x64_0(v *Value) bool {
}
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) >= 8
- // result: (SRAconst (SLLconst <types.UInt32> x [24]) [31])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -16881,7 +16883,7 @@ func rewriteValueARM_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpARMSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -16892,8 +16894,8 @@ func rewriteValueARM_OpRsh8x64_0(v *Value) bool {
func rewriteValueARM_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 x y)
// cond:
// result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
@@ -16901,10 +16903,10 @@ func rewriteValueARM_OpRsh8x8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -17135,14 +17137,14 @@ func rewriteValueARM_OpStaticCall_0(v *Value) bool {
}
func rewriteValueARM_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpARMMOVBstore)
@@ -17152,14 +17154,14 @@ func rewriteValueARM_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpARMMOVHstore)
@@ -17169,14 +17171,14 @@ func rewriteValueARM_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpARMMOVWstore)
@@ -17186,14 +17188,14 @@ func rewriteValueARM_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpARMMOVFstore)
@@ -17203,14 +17205,14 @@ func rewriteValueARM_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpARMMOVDstore)
@@ -17407,8 +17409,8 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [0] _ mem)
// cond:
// result: mem
@@ -17433,14 +17435,14 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpARMMOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 2 {
@@ -17449,12 +17451,12 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -17472,13 +17474,13 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -17486,7 +17488,7 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 4 {
@@ -17495,19 +17497,19 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpARMMOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 4 {
@@ -17516,19 +17518,19 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -17547,25 +17549,25 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -17586,19 +17588,19 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
v.reset(OpARMMOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -17607,44 +17609,44 @@ func rewriteValueARM_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice
// result: (DUFFZERO [4 * (128 - int64(s/4))] ptr (MOVWconst [0]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice) {
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpARMDUFFZERO)
v.AuxInt = 4 * (128 - int64(s/4))
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) (MOVWconst [0]) mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0
+ // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) (MOVWconst [0]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !((s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0) {
+ if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) {
break
}
v.reset(OpARMLoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
@@ -17688,16 +17690,16 @@ func rewriteValueARM_OpZeroExt8to32_0(v *Value) bool {
func rewriteValueARM_OpZeromask_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zeromask x)
// cond:
- // result: (SRAconst (RSBshiftRL <types.Int32> x x [1]) [31])
+ // result: (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31])
for {
x := v.Args[0]
v.reset(OpARMSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32)
v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
@@ -17710,8 +17712,8 @@ func rewriteBlockARM(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockARMEQ:
// match: (EQ (FlagEQ) yes no)
@@ -18083,7 +18085,7 @@ func rewriteBlockARM(b *Block) bool {
_ = v
cond := b.Control
b.Kind = BlockARMNE
- v0 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(cond)
b.SetControl(v0)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index e47055809c..fa2795723d 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueARM64(v *Value) bool {
switch v.Op {
@@ -1878,7 +1880,7 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -1913,7 +1915,7 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
@@ -1949,7 +1951,7 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
@@ -1985,7 +1987,7 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool {
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
@@ -2023,7 +2025,7 @@ func rewriteValueARM64_OpARM64CMPW_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags)
v0.AuxInt = int64(int32(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -2319,7 +2321,7 @@ func rewriteValueARM64_OpARM64CMPshiftLL_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
v1.AuxInt = d
@@ -2361,7 +2363,7 @@ func rewriteValueARM64_OpARM64CMPshiftRA_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
v1.AuxInt = d
@@ -2403,7 +2405,7 @@ func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpARM64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
v1.AuxInt = d
@@ -10219,18 +10221,18 @@ func rewriteValueARM64_OpAvg64u_0(v *Value) bool {
func rewriteValueARM64_OpBitLen64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen64 x)
// cond:
- // result: (SUB (MOVDconst [64]) (CLZ <types.Int> x))
+ // result: (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
for {
x := v.Args[0]
v.reset(OpARM64SUB)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64CLZ, types.Int)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -10239,16 +10241,16 @@ func rewriteValueARM64_OpBitLen64_0(v *Value) bool {
func rewriteValueARM64_OpBitRev16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitRev16 x)
// cond:
- // result: (SRLconst [48] (RBIT <types.UInt64> x))
+ // result: (SRLconst [48] (RBIT <typ.UInt64> x))
for {
x := v.Args[0]
v.reset(OpARM64SRLconst)
v.AuxInt = 48
- v0 := b.NewValue0(v.Pos, OpARM64RBIT, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -10279,16 +10281,16 @@ func rewriteValueARM64_OpBitRev64_0(v *Value) bool {
func rewriteValueARM64_OpBitRev8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitRev8 x)
// cond:
- // result: (SRLconst [56] (RBIT <types.UInt64> x))
+ // result: (SRLconst [56] (RBIT <typ.UInt64> x))
for {
x := v.Args[0]
v.reset(OpARM64SRLconst)
v.AuxInt = 56
- v0 := b.NewValue0(v.Pos, OpARM64RBIT, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -10710,8 +10712,8 @@ func rewriteValueARM64_OpCvt64to64F_0(v *Value) bool {
func rewriteValueARM64_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
@@ -10719,10 +10721,10 @@ func rewriteValueARM64_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -10731,8 +10733,8 @@ func rewriteValueARM64_OpDiv16_0(v *Value) bool {
func rewriteValueARM64_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -10740,10 +10742,10 @@ func rewriteValueARM64_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UDIVW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -10830,8 +10832,8 @@ func rewriteValueARM64_OpDiv64u_0(v *Value) bool {
func rewriteValueARM64_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
@@ -10839,10 +10841,10 @@ func rewriteValueARM64_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -10851,8 +10853,8 @@ func rewriteValueARM64_OpDiv8_0(v *Value) bool {
func rewriteValueARM64_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -10860,10 +10862,10 @@ func rewriteValueARM64_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UDIVW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -10872,8 +10874,8 @@ func rewriteValueARM64_OpDiv8u_0(v *Value) bool {
func rewriteValueARM64_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond:
// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -10881,11 +10883,11 @@ func rewriteValueARM64_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -10902,7 +10904,7 @@ func rewriteValueARM64_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -10919,7 +10921,7 @@ func rewriteValueARM64_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -10936,7 +10938,7 @@ func rewriteValueARM64_OpEq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -10953,7 +10955,7 @@ func rewriteValueARM64_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -10963,8 +10965,8 @@ func rewriteValueARM64_OpEq64F_0(v *Value) bool {
func rewriteValueARM64_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond:
// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -10972,11 +10974,11 @@ func rewriteValueARM64_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -10986,19 +10988,19 @@ func rewriteValueARM64_OpEq8_0(v *Value) bool {
func rewriteValueARM64_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
- // result: (XOR (MOVDconst [1]) (XOR <types.Bool> x y))
+ // result: (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64XOR)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64XOR, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool)
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
@@ -11015,7 +11017,7 @@ func rewriteValueARM64_OpEqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64Equal)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11025,8 +11027,8 @@ func rewriteValueARM64_OpEqPtr_0(v *Value) bool {
func rewriteValueARM64_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -11034,11 +11036,11 @@ func rewriteValueARM64_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11048,8 +11050,8 @@ func rewriteValueARM64_OpGeq16_0(v *Value) bool {
func rewriteValueARM64_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -11057,11 +11059,11 @@ func rewriteValueARM64_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11078,7 +11080,7 @@ func rewriteValueARM64_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11095,7 +11097,7 @@ func rewriteValueARM64_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11112,7 +11114,7 @@ func rewriteValueARM64_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11129,7 +11131,7 @@ func rewriteValueARM64_OpGeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11146,7 +11148,7 @@ func rewriteValueARM64_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11163,7 +11165,7 @@ func rewriteValueARM64_OpGeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11173,8 +11175,8 @@ func rewriteValueARM64_OpGeq64U_0(v *Value) bool {
func rewriteValueARM64_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -11182,11 +11184,11 @@ func rewriteValueARM64_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11196,8 +11198,8 @@ func rewriteValueARM64_OpGeq8_0(v *Value) bool {
func rewriteValueARM64_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -11205,11 +11207,11 @@ func rewriteValueARM64_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11228,8 +11230,8 @@ func rewriteValueARM64_OpGetClosurePtr_0(v *Value) bool {
func rewriteValueARM64_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -11237,11 +11239,11 @@ func rewriteValueARM64_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11251,8 +11253,8 @@ func rewriteValueARM64_OpGreater16_0(v *Value) bool {
func rewriteValueARM64_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -11260,11 +11262,11 @@ func rewriteValueARM64_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11281,7 +11283,7 @@ func rewriteValueARM64_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11298,7 +11300,7 @@ func rewriteValueARM64_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11315,7 +11317,7 @@ func rewriteValueARM64_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11332,7 +11334,7 @@ func rewriteValueARM64_OpGreater64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11349,7 +11351,7 @@ func rewriteValueARM64_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11366,7 +11368,7 @@ func rewriteValueARM64_OpGreater64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11376,8 +11378,8 @@ func rewriteValueARM64_OpGreater64U_0(v *Value) bool {
func rewriteValueARM64_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -11385,11 +11387,11 @@ func rewriteValueARM64_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11399,8 +11401,8 @@ func rewriteValueARM64_OpGreater8_0(v *Value) bool {
func rewriteValueARM64_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -11408,11 +11410,11 @@ func rewriteValueARM64_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11422,17 +11424,17 @@ func rewriteValueARM64_OpGreater8U_0(v *Value) bool {
func rewriteValueARM64_OpHmul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32 x y)
// cond:
- // result: (SRAconst (MULL <types.Int64> x y) [32])
+ // result: (SRAconst (MULL <typ.Int64> x y) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpARM64MULL, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11442,17 +11444,17 @@ func rewriteValueARM64_OpHmul32_0(v *Value) bool {
func rewriteValueARM64_OpHmul32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32u x y)
// cond:
- // result: (SRAconst (UMULL <types.UInt64> x y) [32])
+ // result: (SRAconst (UMULL <typ.UInt64> x y) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpARM64UMULL, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11510,7 +11512,7 @@ func rewriteValueARM64_OpIsInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpARM64LessThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -11526,7 +11528,7 @@ func rewriteValueARM64_OpIsNonNil_0(v *Value) bool {
for {
ptr := v.Args[0]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(ptr)
v.AddArg(v0)
@@ -11543,7 +11545,7 @@ func rewriteValueARM64_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpARM64LessEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -11553,8 +11555,8 @@ func rewriteValueARM64_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValueARM64_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -11562,11 +11564,11 @@ func rewriteValueARM64_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11576,8 +11578,8 @@ func rewriteValueARM64_OpLeq16_0(v *Value) bool {
func rewriteValueARM64_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -11585,11 +11587,11 @@ func rewriteValueARM64_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11606,7 +11608,7 @@ func rewriteValueARM64_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11623,7 +11625,7 @@ func rewriteValueARM64_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -11640,7 +11642,7 @@ func rewriteValueARM64_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11657,7 +11659,7 @@ func rewriteValueARM64_OpLeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11674,7 +11676,7 @@ func rewriteValueARM64_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -11691,7 +11693,7 @@ func rewriteValueARM64_OpLeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11701,8 +11703,8 @@ func rewriteValueARM64_OpLeq64U_0(v *Value) bool {
func rewriteValueARM64_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -11710,11 +11712,11 @@ func rewriteValueARM64_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11724,8 +11726,8 @@ func rewriteValueARM64_OpLeq8_0(v *Value) bool {
func rewriteValueARM64_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -11733,11 +11735,11 @@ func rewriteValueARM64_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessEqualU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11747,8 +11749,8 @@ func rewriteValueARM64_OpLeq8U_0(v *Value) bool {
func rewriteValueARM64_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -11756,11 +11758,11 @@ func rewriteValueARM64_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11770,8 +11772,8 @@ func rewriteValueARM64_OpLess16_0(v *Value) bool {
func rewriteValueARM64_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -11779,11 +11781,11 @@ func rewriteValueARM64_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11800,7 +11802,7 @@ func rewriteValueARM64_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11817,7 +11819,7 @@ func rewriteValueARM64_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -11834,7 +11836,7 @@ func rewriteValueARM64_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11851,7 +11853,7 @@ func rewriteValueARM64_OpLess64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11868,7 +11870,7 @@ func rewriteValueARM64_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -11885,7 +11887,7 @@ func rewriteValueARM64_OpLess64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -11895,8 +11897,8 @@ func rewriteValueARM64_OpLess64U_0(v *Value) bool {
func rewriteValueARM64_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -11904,11 +11906,11 @@ func rewriteValueARM64_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThan)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -11918,8 +11920,8 @@ func rewriteValueARM64_OpLess8_0(v *Value) bool {
func rewriteValueARM64_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -11927,11 +11929,11 @@ func rewriteValueARM64_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64LessThanU)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -12094,8 +12096,8 @@ func rewriteValueARM64_OpLoad_0(v *Value) bool {
func rewriteValueARM64_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -12106,16 +12108,16 @@ func rewriteValueARM64_OpLsh16x16_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12125,8 +12127,8 @@ func rewriteValueARM64_OpLsh16x16_0(v *Value) bool {
func rewriteValueARM64_OpLsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -12137,16 +12139,16 @@ func rewriteValueARM64_OpLsh16x32_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12205,7 +12207,7 @@ func rewriteValueARM64_OpLsh16x64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpConst64, t)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v.AddArg(v2)
@@ -12215,8 +12217,8 @@ func rewriteValueARM64_OpLsh16x64_0(v *Value) bool {
func rewriteValueARM64_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -12227,16 +12229,16 @@ func rewriteValueARM64_OpLsh16x8_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12246,8 +12248,8 @@ func rewriteValueARM64_OpLsh16x8_0(v *Value) bool {
func rewriteValueARM64_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -12258,16 +12260,16 @@ func rewriteValueARM64_OpLsh32x16_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12277,8 +12279,8 @@ func rewriteValueARM64_OpLsh32x16_0(v *Value) bool {
func rewriteValueARM64_OpLsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -12289,16 +12291,16 @@ func rewriteValueARM64_OpLsh32x32_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12357,7 +12359,7 @@ func rewriteValueARM64_OpLsh32x64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpConst64, t)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v.AddArg(v2)
@@ -12367,8 +12369,8 @@ func rewriteValueARM64_OpLsh32x64_0(v *Value) bool {
func rewriteValueARM64_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -12379,16 +12381,16 @@ func rewriteValueARM64_OpLsh32x8_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12398,8 +12400,8 @@ func rewriteValueARM64_OpLsh32x8_0(v *Value) bool {
func rewriteValueARM64_OpLsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -12410,16 +12412,16 @@ func rewriteValueARM64_OpLsh64x16_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12429,8 +12431,8 @@ func rewriteValueARM64_OpLsh64x16_0(v *Value) bool {
func rewriteValueARM64_OpLsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -12441,16 +12443,16 @@ func rewriteValueARM64_OpLsh64x32_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12509,7 +12511,7 @@ func rewriteValueARM64_OpLsh64x64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpConst64, t)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v.AddArg(v2)
@@ -12519,8 +12521,8 @@ func rewriteValueARM64_OpLsh64x64_0(v *Value) bool {
func rewriteValueARM64_OpLsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -12531,16 +12533,16 @@ func rewriteValueARM64_OpLsh64x8_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12550,8 +12552,8 @@ func rewriteValueARM64_OpLsh64x8_0(v *Value) bool {
func rewriteValueARM64_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -12562,16 +12564,16 @@ func rewriteValueARM64_OpLsh8x16_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12581,8 +12583,8 @@ func rewriteValueARM64_OpLsh8x16_0(v *Value) bool {
func rewriteValueARM64_OpLsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -12593,16 +12595,16 @@ func rewriteValueARM64_OpLsh8x32_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12661,7 +12663,7 @@ func rewriteValueARM64_OpLsh8x64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpConst64, t)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v.AddArg(v2)
@@ -12671,8 +12673,8 @@ func rewriteValueARM64_OpLsh8x64_0(v *Value) bool {
func rewriteValueARM64_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -12683,16 +12685,16 @@ func rewriteValueARM64_OpLsh8x8_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -12702,8 +12704,8 @@ func rewriteValueARM64_OpLsh8x8_0(v *Value) bool {
func rewriteValueARM64_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (MODW (SignExt16to32 x) (SignExt16to32 y))
@@ -12711,10 +12713,10 @@ func rewriteValueARM64_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64MODW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -12723,8 +12725,8 @@ func rewriteValueARM64_OpMod16_0(v *Value) bool {
func rewriteValueARM64_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -12732,10 +12734,10 @@ func rewriteValueARM64_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UMODW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -12796,8 +12798,8 @@ func rewriteValueARM64_OpMod64u_0(v *Value) bool {
func rewriteValueARM64_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (MODW (SignExt8to32 x) (SignExt8to32 y))
@@ -12805,10 +12807,10 @@ func rewriteValueARM64_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64MODW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -12817,8 +12819,8 @@ func rewriteValueARM64_OpMod8_0(v *Value) bool {
func rewriteValueARM64_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -12826,10 +12828,10 @@ func rewriteValueARM64_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UMODW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -12838,8 +12840,8 @@ func rewriteValueARM64_OpMod8u_0(v *Value) bool {
func rewriteValueARM64_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -12865,7 +12867,7 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpARM64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -12884,7 +12886,7 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpARM64MOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -12903,7 +12905,7 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpARM64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -12922,7 +12924,7 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpARM64MOVDstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -12942,14 +12944,14 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
v.reset(OpARM64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -12970,14 +12972,14 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
v.reset(OpARM64MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -12998,14 +13000,14 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
v.reset(OpARM64MOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -13026,22 +13028,22 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
v.reset(OpARM64MOVBstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -13063,14 +13065,14 @@ func rewriteValueARM64_OpMove_0(v *Value) bool {
v.reset(OpARM64MOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -13085,8 +13087,8 @@ func rewriteValueARM64_OpMove_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [16] dst src mem)
// cond:
// result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
@@ -13100,14 +13102,14 @@ func rewriteValueARM64_OpMove_10(v *Value) bool {
v.reset(OpARM64MOVDstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -13128,22 +13130,22 @@ func rewriteValueARM64_OpMove_10(v *Value) bool {
v.reset(OpARM64MOVDstore)
v.AuxInt = 16
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v0.AuxInt = 16
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARM64MOVDload, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -13173,7 +13175,7 @@ func rewriteValueARM64_OpMove_10(v *Value) bool {
v1.AuxInt = s - s%8
v1.AddArg(src)
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMove, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
v2.AuxInt = s - s%8
v2.AddArg(dst)
v2.AddArg(src)
@@ -13369,8 +13371,8 @@ func rewriteValueARM64_OpNeg8_0(v *Value) bool {
func rewriteValueARM64_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -13378,11 +13380,11 @@ func rewriteValueARM64_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -13399,7 +13401,7 @@ func rewriteValueARM64_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -13416,7 +13418,7 @@ func rewriteValueARM64_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPS, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -13433,7 +13435,7 @@ func rewriteValueARM64_OpNeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -13450,7 +13452,7 @@ func rewriteValueARM64_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64FCMPD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -13460,8 +13462,8 @@ func rewriteValueARM64_OpNeq64F_0(v *Value) bool {
func rewriteValueARM64_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -13469,11 +13471,11 @@ func rewriteValueARM64_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -13503,7 +13505,7 @@ func rewriteValueARM64_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64NotEqual)
- v0 := b.NewValue0(v.Pos, OpARM64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -13526,15 +13528,15 @@ func rewriteValueARM64_OpNilCheck_0(v *Value) bool {
func rewriteValueARM64_OpNot_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Not x)
// cond:
// result: (XOR (MOVDconst [1]) x)
for {
x := v.Args[0]
v.reset(OpARM64XOR)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(x)
@@ -13660,8 +13662,8 @@ func rewriteValueARM64_OpRound64F_0(v *Value) bool {
func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -13671,19 +13673,19 @@ func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -13693,8 +13695,8 @@ func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool {
func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -13704,19 +13706,19 @@ func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -13726,8 +13728,8 @@ func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool {
func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 x (MOVDconst [c]))
// cond: uint64(c) < 16
// result: (SRLconst (ZeroExt16to64 x) [c])
@@ -13743,7 +13745,7 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpARM64SRLconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -13773,7 +13775,7 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
@@ -13781,7 +13783,7 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v.AddArg(v3)
@@ -13791,8 +13793,8 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool {
func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -13802,19 +13804,19 @@ func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -13824,8 +13826,8 @@ func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool {
func rewriteValueARM64_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 x y)
// cond:
// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
@@ -13833,19 +13835,19 @@ func rewriteValueARM64_OpRsh16x16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -13856,8 +13858,8 @@ func rewriteValueARM64_OpRsh16x16_0(v *Value) bool {
func rewriteValueARM64_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 x y)
// cond:
// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
@@ -13865,19 +13867,19 @@ func rewriteValueARM64_OpRsh16x32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -13888,8 +13890,8 @@ func rewriteValueARM64_OpRsh16x32_0(v *Value) bool {
func rewriteValueARM64_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 x (MOVDconst [c]))
// cond: uint64(c) < 16
// result: (SRAconst (SignExt16to64 x) [c])
@@ -13905,7 +13907,7 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpARM64SRAconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -13925,7 +13927,7 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpARM64SRAconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -13937,7 +13939,7 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
@@ -13945,7 +13947,7 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v2.AuxInt = 63
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v1.AddArg(v3)
@@ -13956,8 +13958,8 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool {
func rewriteValueARM64_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 x y)
// cond:
// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
@@ -13965,19 +13967,19 @@ func rewriteValueARM64_OpRsh16x8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -13988,8 +13990,8 @@ func rewriteValueARM64_OpRsh16x8_0(v *Value) bool {
func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -13999,19 +14001,19 @@ func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -14021,8 +14023,8 @@ func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool {
func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -14032,19 +14034,19 @@ func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -14054,8 +14056,8 @@ func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool {
func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux64 x (MOVDconst [c]))
// cond: uint64(c) < 32
// result: (SRLconst (ZeroExt32to64 x) [c])
@@ -14071,7 +14073,7 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool {
}
v.reset(OpARM64SRLconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -14101,7 +14103,7 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
@@ -14109,7 +14111,7 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v.AddArg(v3)
@@ -14119,8 +14121,8 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool {
func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -14130,19 +14132,19 @@ func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -14152,8 +14154,8 @@ func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool {
func rewriteValueARM64_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 x y)
// cond:
// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
@@ -14161,19 +14163,19 @@ func rewriteValueARM64_OpRsh32x16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -14184,8 +14186,8 @@ func rewriteValueARM64_OpRsh32x16_0(v *Value) bool {
func rewriteValueARM64_OpRsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x32 x y)
// cond:
// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
@@ -14193,19 +14195,19 @@ func rewriteValueARM64_OpRsh32x32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -14216,8 +14218,8 @@ func rewriteValueARM64_OpRsh32x32_0(v *Value) bool {
func rewriteValueARM64_OpRsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x64 x (MOVDconst [c]))
// cond: uint64(c) < 32
// result: (SRAconst (SignExt32to64 x) [c])
@@ -14233,7 +14235,7 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool {
}
v.reset(OpARM64SRAconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -14253,7 +14255,7 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool {
}
v.reset(OpARM64SRAconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -14265,7 +14267,7 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
@@ -14273,7 +14275,7 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v2.AuxInt = 63
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v1.AddArg(v3)
@@ -14284,8 +14286,8 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool {
func rewriteValueARM64_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 x y)
// cond:
// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
@@ -14293,19 +14295,19 @@ func rewriteValueARM64_OpRsh32x8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -14316,8 +14318,8 @@ func rewriteValueARM64_OpRsh32x8_0(v *Value) bool {
func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -14328,16 +14330,16 @@ func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -14347,8 +14349,8 @@ func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool {
func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -14359,16 +14361,16 @@ func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -14427,7 +14429,7 @@ func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpConst64, t)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v.AddArg(v2)
@@ -14437,8 +14439,8 @@ func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool {
func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -14449,16 +14451,16 @@ func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool {
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -14468,8 +14470,8 @@ func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool {
func rewriteValueARM64_OpRsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x16 x y)
// cond:
// result: (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
@@ -14479,15 +14481,15 @@ func rewriteValueARM64_OpRsh64x16_0(v *Value) bool {
v.reset(OpARM64SRA)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v2.AuxInt = 63
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -14498,8 +14500,8 @@ func rewriteValueARM64_OpRsh64x16_0(v *Value) bool {
func rewriteValueARM64_OpRsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x32 x y)
// cond:
// result: (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
@@ -14509,15 +14511,15 @@ func rewriteValueARM64_OpRsh64x32_0(v *Value) bool {
v.reset(OpARM64SRA)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v2.AuxInt = 63
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -14577,7 +14579,7 @@ func rewriteValueARM64_OpRsh64x64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AuxInt = 63
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v2.AuxInt = 64
v2.AddArg(y)
v0.AddArg(v2)
@@ -14588,8 +14590,8 @@ func rewriteValueARM64_OpRsh64x64_0(v *Value) bool {
func rewriteValueARM64_OpRsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x8 x y)
// cond:
// result: (SRA x (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
@@ -14599,15 +14601,15 @@ func rewriteValueARM64_OpRsh64x8_0(v *Value) bool {
v.reset(OpARM64SRA)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v2.AuxInt = 63
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -14618,8 +14620,8 @@ func rewriteValueARM64_OpRsh64x8_0(v *Value) bool {
func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -14629,19 +14631,19 @@ func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -14651,8 +14653,8 @@ func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool {
func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
@@ -14662,19 +14664,19 @@ func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -14684,8 +14686,8 @@ func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool {
func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 x (MOVDconst [c]))
// cond: uint64(c) < 8
// result: (SRLconst (ZeroExt8to64 x) [c])
@@ -14701,7 +14703,7 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpARM64SRLconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -14731,7 +14733,7 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
@@ -14739,7 +14741,7 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpConst64, t)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v.AddArg(v3)
@@ -14749,8 +14751,8 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool {
func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
@@ -14760,19 +14762,19 @@ func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpConst64, t)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -14782,8 +14784,8 @@ func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool {
func rewriteValueARM64_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 x y)
// cond:
// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
@@ -14791,19 +14793,19 @@ func rewriteValueARM64_OpRsh8x16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -14814,8 +14816,8 @@ func rewriteValueARM64_OpRsh8x16_0(v *Value) bool {
func rewriteValueARM64_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 x y)
// cond:
// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
@@ -14823,19 +14825,19 @@ func rewriteValueARM64_OpRsh8x32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -14846,8 +14848,8 @@ func rewriteValueARM64_OpRsh8x32_0(v *Value) bool {
func rewriteValueARM64_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 x (MOVDconst [c]))
// cond: uint64(c) < 8
// result: (SRAconst (SignExt8to64 x) [c])
@@ -14863,7 +14865,7 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpARM64SRAconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -14883,7 +14885,7 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpARM64SRAconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -14895,7 +14897,7 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
@@ -14903,7 +14905,7 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool {
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v2.AuxInt = 63
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v3.AuxInt = 64
v3.AddArg(y)
v1.AddArg(v3)
@@ -14914,8 +14916,8 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool {
func rewriteValueARM64_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 x y)
// cond:
// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
@@ -14923,19 +14925,19 @@ func rewriteValueARM64_OpRsh8x8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v3.AuxInt = 63
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -15054,14 +15056,14 @@ func rewriteValueARM64_OpStaticCall_0(v *Value) bool {
}
func rewriteValueARM64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpARM64MOVBstore)
@@ -15071,14 +15073,14 @@ func rewriteValueARM64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpARM64MOVHstore)
@@ -15088,14 +15090,14 @@ func rewriteValueARM64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpARM64MOVWstore)
@@ -15105,14 +15107,14 @@ func rewriteValueARM64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && !is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) {
break
}
v.reset(OpARM64MOVDstore)
@@ -15122,14 +15124,14 @@ func rewriteValueARM64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpARM64FMOVSstore)
@@ -15139,14 +15141,14 @@ func rewriteValueARM64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpARM64FMOVDstore)
@@ -15375,8 +15377,8 @@ func rewriteValueARM64_OpXor8_0(v *Value) bool {
func rewriteValueARM64_OpZero_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [0] _ mem)
// cond:
// result: mem
@@ -15401,7 +15403,7 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpARM64MOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -15418,7 +15420,7 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpARM64MOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -15435,7 +15437,7 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpARM64MOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -15452,7 +15454,7 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpARM64MOVDstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -15470,12 +15472,12 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
v.reset(OpARM64MOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -15494,12 +15496,12 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
v.reset(OpARM64MOVBstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -15518,12 +15520,12 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
v.reset(OpARM64MOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -15542,18 +15544,18 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
v.reset(OpARM64MOVBstore)
v.AuxInt = 6
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -15573,12 +15575,12 @@ func rewriteValueARM64_OpZero_0(v *Value) bool {
v.reset(OpARM64MOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -15592,8 +15594,8 @@ func rewriteValueARM64_OpZero_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [16] ptr mem)
// cond:
// result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
@@ -15606,12 +15608,12 @@ func rewriteValueARM64_OpZero_10(v *Value) bool {
v.reset(OpARM64MOVDstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -15630,18 +15632,18 @@ func rewriteValueARM64_OpZero_10(v *Value) bool {
v.reset(OpARM64MOVDstore)
v.AuxInt = 16
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -15665,7 +15667,7 @@ func rewriteValueARM64_OpZero_10(v *Value) bool {
v0.AuxInt = s - s%8
v0.AddArg(ptr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
v1.AuxInt = s - s%8
v1.AddArg(ptr)
v1.AddArg(mem)
@@ -15780,8 +15782,8 @@ func rewriteBlockARM64(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockARM64EQ:
// match: (EQ (CMPconst [0] x) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index bed923b7e9..6c36976e26 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueMIPS(v *Value) bool {
switch v.Op {
@@ -689,11 +691,11 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))))) mem)
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -702,34 +704,34 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSOR, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(val)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v6.AuxInt = 3
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg(v5)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpMIPSNORconst, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
v7.AuxInt = 0
- v8 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v9.AuxInt = 0xff
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v10.AuxInt = 3
- v11 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v11.AuxInt = 3
v11.AddArg(ptr)
v10.AddArg(v11)
@@ -742,7 +744,7 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -751,39 +753,39 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSOR, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(val)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v6.AuxInt = 3
- v7 := b.NewValue0(v.Pos, OpMIPSXORconst, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
v7.AuxInt = 3
v7.AddArg(ptr)
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg(v5)
v2.AddArg(v3)
- v8 := b.NewValue0(v.Pos, OpMIPSNORconst, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
v8.AuxInt = 0
- v9 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v10.AuxInt = 0xff
v9.AddArg(v10)
- v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v11.AuxInt = 3
- v12 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v12 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v12.AuxInt = 3
- v13 := b.NewValue0(v.Pos, OpMIPSXORconst, types.UInt32)
+ v13 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
v13.AuxInt = 3
v13.AddArg(ptr)
v12.AddArg(v13)
@@ -860,11 +862,11 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicOr8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) mem)
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -873,19 +875,19 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(val)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v4.AuxInt = 3
- v5 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v5.AuxInt = 3
v5.AddArg(ptr)
v4.AddArg(v5)
@@ -896,7 +898,7 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
}
// match: (AtomicOr8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) mem)
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -905,21 +907,21 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(val)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v4.AuxInt = 3
- v5 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSXORconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
v6.AuxInt = 3
v6.AddArg(ptr)
v5.AddArg(v6)
@@ -986,8 +988,8 @@ func rewriteValueMIPS_OpAvg32u_0(v *Value) bool {
func rewriteValueMIPS_OpBitLen32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen32 <t> x)
// cond:
// result: (SUB (MOVWconst [32]) (CLZ <t> x))
@@ -995,7 +997,7 @@ func rewriteValueMIPS_OpBitLen32_0(v *Value) bool {
t := v.Type
x := v.Args[0]
v.reset(OpMIPSSUB)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 32
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
@@ -1149,8 +1151,8 @@ func rewriteValueMIPS_OpConvert_0(v *Value) bool {
func rewriteValueMIPS_OpCtz32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz32 <t> x)
// cond:
// result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
@@ -1158,7 +1160,7 @@ func rewriteValueMIPS_OpCtz32_0(v *Value) bool {
t := v.Type
x := v.Args[0]
v.reset(OpMIPSSUB)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 32
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
@@ -1244,8 +1246,8 @@ func rewriteValueMIPS_OpCvt64Fto32F_0(v *Value) bool {
func rewriteValueMIPS_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
@@ -1253,11 +1255,11 @@ func rewriteValueMIPS_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1267,8 +1269,8 @@ func rewriteValueMIPS_OpDiv16_0(v *Value) bool {
func rewriteValueMIPS_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -1276,11 +1278,11 @@ func rewriteValueMIPS_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1290,8 +1292,8 @@ func rewriteValueMIPS_OpDiv16u_0(v *Value) bool {
func rewriteValueMIPS_OpDiv32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 x y)
// cond:
// result: (Select1 (DIV x y))
@@ -1299,7 +1301,7 @@ func rewriteValueMIPS_OpDiv32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1322,8 +1324,8 @@ func rewriteValueMIPS_OpDiv32F_0(v *Value) bool {
func rewriteValueMIPS_OpDiv32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u x y)
// cond:
// result: (Select1 (DIVU x y))
@@ -1331,7 +1333,7 @@ func rewriteValueMIPS_OpDiv32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1354,8 +1356,8 @@ func rewriteValueMIPS_OpDiv64F_0(v *Value) bool {
func rewriteValueMIPS_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
@@ -1363,11 +1365,11 @@ func rewriteValueMIPS_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1377,8 +1379,8 @@ func rewriteValueMIPS_OpDiv8_0(v *Value) bool {
func rewriteValueMIPS_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -1386,11 +1388,11 @@ func rewriteValueMIPS_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1400,8 +1402,8 @@ func rewriteValueMIPS_OpDiv8u_0(v *Value) bool {
func rewriteValueMIPS_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond:
// result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -1410,11 +1412,11 @@ func rewriteValueMIPS_OpEq16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1424,8 +1426,8 @@ func rewriteValueMIPS_OpEq16_0(v *Value) bool {
func rewriteValueMIPS_OpEq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq32 x y)
// cond:
// result: (SGTUconst [1] (XOR x y))
@@ -1434,7 +1436,7 @@ func rewriteValueMIPS_OpEq32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1451,7 +1453,7 @@ func rewriteValueMIPS_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1468,7 +1470,7 @@ func rewriteValueMIPS_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1478,8 +1480,8 @@ func rewriteValueMIPS_OpEq64F_0(v *Value) bool {
func rewriteValueMIPS_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond:
// result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -1488,11 +1490,11 @@ func rewriteValueMIPS_OpEq8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1502,17 +1504,17 @@ func rewriteValueMIPS_OpEq8_0(v *Value) bool {
func rewriteValueMIPS_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
- // result: (XORconst [1] (XOR <types.Bool> x y))
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1522,8 +1524,8 @@ func rewriteValueMIPS_OpEqB_0(v *Value) bool {
func rewriteValueMIPS_OpEqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqPtr x y)
// cond:
// result: (SGTUconst [1] (XOR x y))
@@ -1532,7 +1534,7 @@ func rewriteValueMIPS_OpEqPtr_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1542,8 +1544,8 @@ func rewriteValueMIPS_OpEqPtr_0(v *Value) bool {
func rewriteValueMIPS_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
@@ -1552,11 +1554,11 @@ func rewriteValueMIPS_OpGeq16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1566,8 +1568,8 @@ func rewriteValueMIPS_OpGeq16_0(v *Value) bool {
func rewriteValueMIPS_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
@@ -1576,11 +1578,11 @@ func rewriteValueMIPS_OpGeq16U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1590,8 +1592,8 @@ func rewriteValueMIPS_OpGeq16U_0(v *Value) bool {
func rewriteValueMIPS_OpGeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32 x y)
// cond:
// result: (XORconst [1] (SGT y x))
@@ -1600,7 +1602,7 @@ func rewriteValueMIPS_OpGeq32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -1617,7 +1619,7 @@ func rewriteValueMIPS_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1627,8 +1629,8 @@ func rewriteValueMIPS_OpGeq32F_0(v *Value) bool {
func rewriteValueMIPS_OpGeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32U x y)
// cond:
// result: (XORconst [1] (SGTU y x))
@@ -1637,7 +1639,7 @@ func rewriteValueMIPS_OpGeq32U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -1654,7 +1656,7 @@ func rewriteValueMIPS_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1664,8 +1666,8 @@ func rewriteValueMIPS_OpGeq64F_0(v *Value) bool {
func rewriteValueMIPS_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
@@ -1674,11 +1676,11 @@ func rewriteValueMIPS_OpGeq8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1688,8 +1690,8 @@ func rewriteValueMIPS_OpGeq8_0(v *Value) bool {
func rewriteValueMIPS_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
@@ -1698,11 +1700,11 @@ func rewriteValueMIPS_OpGeq8U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1721,8 +1723,8 @@ func rewriteValueMIPS_OpGetClosurePtr_0(v *Value) bool {
func rewriteValueMIPS_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (SGT (SignExt16to32 x) (SignExt16to32 y))
@@ -1730,10 +1732,10 @@ func rewriteValueMIPS_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1742,8 +1744,8 @@ func rewriteValueMIPS_OpGreater16_0(v *Value) bool {
func rewriteValueMIPS_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -1751,10 +1753,10 @@ func rewriteValueMIPS_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1783,7 +1785,7 @@ func rewriteValueMIPS_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1813,7 +1815,7 @@ func rewriteValueMIPS_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1823,8 +1825,8 @@ func rewriteValueMIPS_OpGreater64F_0(v *Value) bool {
func rewriteValueMIPS_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (SGT (SignExt8to32 x) (SignExt8to32 y))
@@ -1832,10 +1834,10 @@ func rewriteValueMIPS_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1844,8 +1846,8 @@ func rewriteValueMIPS_OpGreater8_0(v *Value) bool {
func rewriteValueMIPS_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -1853,10 +1855,10 @@ func rewriteValueMIPS_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1865,8 +1867,8 @@ func rewriteValueMIPS_OpGreater8U_0(v *Value) bool {
func rewriteValueMIPS_OpHmul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32 x y)
// cond:
// result: (Select0 (MULT x y))
@@ -1874,7 +1876,7 @@ func rewriteValueMIPS_OpHmul32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULT, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1884,8 +1886,8 @@ func rewriteValueMIPS_OpHmul32_0(v *Value) bool {
func rewriteValueMIPS_OpHmul32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32u x y)
// cond:
// result: (Select0 (MULTU x y))
@@ -1893,7 +1895,7 @@ func rewriteValueMIPS_OpHmul32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1931,8 +1933,8 @@ func rewriteValueMIPS_OpIsInBounds_0(v *Value) bool {
func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsNonNil ptr)
// cond:
// result: (SGTU ptr (MOVWconst [0]))
@@ -1940,7 +1942,7 @@ func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool {
ptr := v.Args[0]
v.reset(OpMIPSSGTU)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
return true
@@ -1949,8 +1951,8 @@ func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool {
func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsSliceInBounds idx len)
// cond:
// result: (XORconst [1] (SGTU idx len))
@@ -1959,7 +1961,7 @@ func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool {
len := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -1969,8 +1971,8 @@ func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValueMIPS_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
@@ -1979,11 +1981,11 @@ func rewriteValueMIPS_OpLeq16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1993,8 +1995,8 @@ func rewriteValueMIPS_OpLeq16_0(v *Value) bool {
func rewriteValueMIPS_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -2003,11 +2005,11 @@ func rewriteValueMIPS_OpLeq16U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2017,8 +2019,8 @@ func rewriteValueMIPS_OpLeq16U_0(v *Value) bool {
func rewriteValueMIPS_OpLeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32 x y)
// cond:
// result: (XORconst [1] (SGT x y))
@@ -2027,7 +2029,7 @@ func rewriteValueMIPS_OpLeq32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2044,7 +2046,7 @@ func rewriteValueMIPS_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2054,8 +2056,8 @@ func rewriteValueMIPS_OpLeq32F_0(v *Value) bool {
func rewriteValueMIPS_OpLeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32U x y)
// cond:
// result: (XORconst [1] (SGTU x y))
@@ -2064,7 +2066,7 @@ func rewriteValueMIPS_OpLeq32U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2081,7 +2083,7 @@ func rewriteValueMIPS_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2091,8 +2093,8 @@ func rewriteValueMIPS_OpLeq64F_0(v *Value) bool {
func rewriteValueMIPS_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
@@ -2101,11 +2103,11 @@ func rewriteValueMIPS_OpLeq8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2115,8 +2117,8 @@ func rewriteValueMIPS_OpLeq8_0(v *Value) bool {
func rewriteValueMIPS_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -2125,11 +2127,11 @@ func rewriteValueMIPS_OpLeq8U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2139,8 +2141,8 @@ func rewriteValueMIPS_OpLeq8U_0(v *Value) bool {
func rewriteValueMIPS_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (SGT (SignExt16to32 y) (SignExt16to32 x))
@@ -2148,10 +2150,10 @@ func rewriteValueMIPS_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2160,8 +2162,8 @@ func rewriteValueMIPS_OpLess16_0(v *Value) bool {
func rewriteValueMIPS_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
@@ -2169,10 +2171,10 @@ func rewriteValueMIPS_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2201,7 +2203,7 @@ func rewriteValueMIPS_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2231,7 +2233,7 @@ func rewriteValueMIPS_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2241,8 +2243,8 @@ func rewriteValueMIPS_OpLess64F_0(v *Value) bool {
func rewriteValueMIPS_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (SGT (SignExt8to32 y) (SignExt8to32 x))
@@ -2250,10 +2252,10 @@ func rewriteValueMIPS_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2262,8 +2264,8 @@ func rewriteValueMIPS_OpLess8_0(v *Value) bool {
func rewriteValueMIPS_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
@@ -2271,10 +2273,10 @@ func rewriteValueMIPS_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2406,8 +2408,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool {
func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -2418,16 +2420,16 @@ func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2437,8 +2439,8 @@ func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool {
func rewriteValueMIPS_OpLsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -2451,10 +2453,10 @@ func rewriteValueMIPS_OpLsh16x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -2501,8 +2503,8 @@ func rewriteValueMIPS_OpLsh16x64_0(v *Value) bool {
func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -2513,16 +2515,16 @@ func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2532,8 +2534,8 @@ func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool {
func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -2544,16 +2546,16 @@ func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2563,8 +2565,8 @@ func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool {
func rewriteValueMIPS_OpLsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -2577,10 +2579,10 @@ func rewriteValueMIPS_OpLsh32x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -2627,8 +2629,8 @@ func rewriteValueMIPS_OpLsh32x64_0(v *Value) bool {
func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -2639,16 +2641,16 @@ func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2658,8 +2660,8 @@ func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool {
func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -2670,16 +2672,16 @@ func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2689,8 +2691,8 @@ func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool {
func rewriteValueMIPS_OpLsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -2703,10 +2705,10 @@ func rewriteValueMIPS_OpLsh8x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -2753,8 +2755,8 @@ func rewriteValueMIPS_OpLsh8x64_0(v *Value) bool {
func rewriteValueMIPS_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -2765,16 +2767,16 @@ func rewriteValueMIPS_OpLsh8x8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -6044,8 +6046,8 @@ func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool {
func rewriteValueMIPS_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
@@ -6053,11 +6055,11 @@ func rewriteValueMIPS_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6067,8 +6069,8 @@ func rewriteValueMIPS_OpMod16_0(v *Value) bool {
func rewriteValueMIPS_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -6076,11 +6078,11 @@ func rewriteValueMIPS_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6090,8 +6092,8 @@ func rewriteValueMIPS_OpMod16u_0(v *Value) bool {
func rewriteValueMIPS_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
// result: (Select0 (DIV x y))
@@ -6099,7 +6101,7 @@ func rewriteValueMIPS_OpMod32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6109,8 +6111,8 @@ func rewriteValueMIPS_OpMod32_0(v *Value) bool {
func rewriteValueMIPS_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
// result: (Select0 (DIVU x y))
@@ -6118,7 +6120,7 @@ func rewriteValueMIPS_OpMod32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6128,8 +6130,8 @@ func rewriteValueMIPS_OpMod32u_0(v *Value) bool {
func rewriteValueMIPS_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
@@ -6137,11 +6139,11 @@ func rewriteValueMIPS_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6151,8 +6153,8 @@ func rewriteValueMIPS_OpMod8_0(v *Value) bool {
func rewriteValueMIPS_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -6160,11 +6162,11 @@ func rewriteValueMIPS_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6174,8 +6176,8 @@ func rewriteValueMIPS_OpMod8u_0(v *Value) bool {
func rewriteValueMIPS_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -6201,7 +6203,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpMIPSMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6209,7 +6211,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore dst (MOVHUload src mem) mem)
for {
if v.AuxInt != 2 {
@@ -6219,12 +6221,12 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6244,14 +6246,14 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6260,7 +6262,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
@@ -6270,12 +6272,12 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6283,7 +6285,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
if v.AuxInt != 4 {
@@ -6293,20 +6295,20 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6327,30 +6329,30 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6373,22 +6375,22 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6398,7 +6400,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
if v.AuxInt != 8 {
@@ -6408,20 +6410,20 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6430,7 +6432,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
if v.AuxInt != 8 {
@@ -6440,36 +6442,36 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6486,10 +6488,10 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [6] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
if v.AuxInt != 6 {
@@ -6499,28 +6501,28 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6530,7 +6532,7 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
if v.AuxInt != 12 {
@@ -6540,28 +6542,28 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6571,7 +6573,7 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
for {
if v.AuxInt != 16 {
@@ -6581,36 +6583,36 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 12
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AuxInt = 12
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AuxInt = 4
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v4.AuxInt = 4
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6621,23 +6623,23 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: (s > 16 || t.(Type).Alignment()%4 != 0)
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.(*types.Type).Alignment()%4 != 0)
+ // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s > 16 || t.(Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
@@ -6781,8 +6783,8 @@ func rewriteValueMIPS_OpNeg8_0(v *Value) bool {
func rewriteValueMIPS_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond:
// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
@@ -6790,15 +6792,15 @@ func rewriteValueMIPS_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -6807,8 +6809,8 @@ func rewriteValueMIPS_OpNeq16_0(v *Value) bool {
func rewriteValueMIPS_OpNeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq32 x y)
// cond:
// result: (SGTU (XOR x y) (MOVWconst [0]))
@@ -6816,11 +6818,11 @@ func rewriteValueMIPS_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
return true
@@ -6836,7 +6838,7 @@ func rewriteValueMIPS_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagFalse)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6853,7 +6855,7 @@ func rewriteValueMIPS_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagFalse)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6863,8 +6865,8 @@ func rewriteValueMIPS_OpNeq64F_0(v *Value) bool {
func rewriteValueMIPS_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond:
// result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
@@ -6872,15 +6874,15 @@ func rewriteValueMIPS_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -6902,8 +6904,8 @@ func rewriteValueMIPS_OpNeqB_0(v *Value) bool {
func rewriteValueMIPS_OpNeqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqPtr x y)
// cond:
// result: (SGTU (XOR x y) (MOVWconst [0]))
@@ -6911,11 +6913,11 @@ func rewriteValueMIPS_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
return true
@@ -7052,8 +7054,8 @@ func rewriteValueMIPS_OpRound64F_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -7063,19 +7065,19 @@ func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7085,8 +7087,8 @@ func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -7096,15 +7098,15 @@ func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v.AddArg(v3)
@@ -7114,11 +7116,11 @@ func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7131,7 +7133,7 @@ func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpMIPSSRLconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -7158,8 +7160,8 @@ func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -7169,19 +7171,19 @@ func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7191,28 +7193,28 @@ func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7223,24 +7225,24 @@ func rewriteValueMIPS_OpRsh16x16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v1.AddArg(v3)
@@ -7251,11 +7253,11 @@ func rewriteValueMIPS_OpRsh16x32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7268,7 +7270,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -7276,7 +7278,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
}
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) >= 16
- // result: (SRAconst (SLLconst <types.UInt32> x [16]) [31])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7289,7 +7291,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -7300,28 +7302,28 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7332,8 +7334,8 @@ func rewriteValueMIPS_OpRsh16x8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -7344,16 +7346,16 @@ func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -7363,8 +7365,8 @@ func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -7377,10 +7379,10 @@ func rewriteValueMIPS_OpRsh32Ux32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -7427,8 +7429,8 @@ func rewriteValueMIPS_OpRsh32Ux64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -7439,16 +7441,16 @@ func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -7458,26 +7460,26 @@ func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 x y)
// cond:
- // result: (SRA x ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -7488,22 +7490,22 @@ func rewriteValueMIPS_OpRsh32x16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x32 x y)
// cond:
- // result: (SRA x ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = -1
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v0.AddArg(v2)
@@ -7553,26 +7555,26 @@ func rewriteValueMIPS_OpRsh32x64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 x y)
// cond:
- // result: (SRA x ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -7583,8 +7585,8 @@ func rewriteValueMIPS_OpRsh32x8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -7594,19 +7596,19 @@ func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7616,8 +7618,8 @@ func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -7627,15 +7629,15 @@ func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v.AddArg(v3)
@@ -7645,11 +7647,11 @@ func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7662,7 +7664,7 @@ func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpMIPSSRLconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -7689,8 +7691,8 @@ func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -7700,19 +7702,19 @@ func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7722,28 +7724,28 @@ func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7754,24 +7756,24 @@ func rewriteValueMIPS_OpRsh8x16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v1.AddArg(v3)
@@ -7782,11 +7784,11 @@ func rewriteValueMIPS_OpRsh8x32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7799,7 +7801,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -7807,7 +7809,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
}
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) >= 8
- // result: (SRAconst (SLLconst <types.UInt32> x [24]) [31])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7820,7 +7822,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -7831,28 +7833,28 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7863,8 +7865,8 @@ func rewriteValueMIPS_OpRsh8x8_0(v *Value) bool {
func rewriteValueMIPS_OpSelect0_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Select0 (Add32carry <t> x y))
// cond:
// result: (ADD <t.FieldType(0)> x y)
@@ -7996,7 +7998,7 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool {
v0.AuxInt = -1
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(x)
@@ -8023,7 +8025,7 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool {
v0.AuxInt = -1
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(x)
@@ -8169,11 +8171,11 @@ func rewriteValueMIPS_OpSelect0_10(v *Value) bool {
func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Select1 (Add32carry <t> x y))
// cond:
- // result: (SGTU <types.Bool> x (ADD <t.FieldType(0)> x y))
+ // result: (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
for {
v_0 := v.Args[0]
if v_0.Op != OpAdd32carry {
@@ -8183,7 +8185,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpMIPSSGTU)
- v.Type = types.Bool
+ v.Type = typ.Bool
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0))
v0.AddArg(x)
@@ -8193,7 +8195,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
}
// match: (Select1 (Sub32carry <t> x y))
// cond:
- // result: (SGTU <types.Bool> (SUB <t.FieldType(0)> x y) x)
+ // result: (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
for {
v_0 := v.Args[0]
if v_0.Op != OpSub32carry {
@@ -8203,7 +8205,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpMIPSSGTU)
- v.Type = types.Bool
+ v.Type = typ.Bool
v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0))
v0.AddArg(x)
v0.AddArg(y)
@@ -8560,14 +8562,14 @@ func rewriteValueMIPS_OpStaticCall_0(v *Value) bool {
}
func rewriteValueMIPS_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpMIPSMOVBstore)
@@ -8577,14 +8579,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpMIPSMOVHstore)
@@ -8594,14 +8596,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVWstore)
@@ -8611,14 +8613,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVFstore)
@@ -8628,14 +8630,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVDstore)
@@ -8822,8 +8824,8 @@ func rewriteValueMIPS_OpXor8_0(v *Value) bool {
func rewriteValueMIPS_OpZero_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [0] _ mem)
// cond:
// result: mem
@@ -8848,14 +8850,14 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpMIPSMOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 2 {
@@ -8864,12 +8866,12 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -8887,13 +8889,13 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -8901,7 +8903,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 4 {
@@ -8910,19 +8912,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 4 {
@@ -8931,19 +8933,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -8962,25 +8964,25 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -9001,19 +9003,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9022,7 +9024,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
for {
if v.AuxInt != 6 {
@@ -9031,25 +9033,25 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9058,7 +9060,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 8 {
@@ -9067,19 +9069,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -9093,10 +9095,10 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [12] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
for {
if v.AuxInt != 12 {
@@ -9105,25 +9107,25 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9132,7 +9134,7 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
for {
if v.AuxInt != 16 {
@@ -9141,31 +9143,31 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 12
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AuxInt = 4
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -9175,21 +9177,21 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 16 || t.(Type).Alignment()%4 != 0)
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.(*types.Type).Alignment()%4 != 0)
+ // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s > 16 || t.(Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
@@ -9233,17 +9235,17 @@ func rewriteValueMIPS_OpZeroExt8to32_0(v *Value) bool {
func rewriteValueMIPS_OpZeromask_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zeromask x)
// cond:
// result: (NEG (SGTU x (MOVWconst [0])))
for {
x := v.Args[0]
v.reset(OpMIPSNEG)
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v0.AddArg(v1)
v.AddArg(v0)
@@ -9255,8 +9257,8 @@ func rewriteBlockMIPS(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockMIPSEQ:
// match: (EQ (FPFlagTrue cmp) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index 7958537a8c..59f4659164 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueMIPS64(v *Value) bool {
switch v.Op {
@@ -781,15 +783,15 @@ func rewriteValueMIPS64_OpClosureCall_0(v *Value) bool {
func rewriteValueMIPS64_OpCom16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Com16 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
@@ -799,15 +801,15 @@ func rewriteValueMIPS64_OpCom16_0(v *Value) bool {
func rewriteValueMIPS64_OpCom32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Com32 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
@@ -817,15 +819,15 @@ func rewriteValueMIPS64_OpCom32_0(v *Value) bool {
func rewriteValueMIPS64_OpCom64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Com64 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
@@ -835,15 +837,15 @@ func rewriteValueMIPS64_OpCom64_0(v *Value) bool {
func rewriteValueMIPS64_OpCom8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Com8 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
@@ -1063,8 +1065,8 @@ func rewriteValueMIPS64_OpCvt64to64F_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
@@ -1072,11 +1074,11 @@ func rewriteValueMIPS64_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1086,8 +1088,8 @@ func rewriteValueMIPS64_OpDiv16_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
@@ -1095,11 +1097,11 @@ func rewriteValueMIPS64_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1109,8 +1111,8 @@ func rewriteValueMIPS64_OpDiv16u_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 x y)
// cond:
// result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
@@ -1118,11 +1120,11 @@ func rewriteValueMIPS64_OpDiv32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1145,8 +1147,8 @@ func rewriteValueMIPS64_OpDiv32F_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u x y)
// cond:
// result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
@@ -1154,11 +1156,11 @@ func rewriteValueMIPS64_OpDiv32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1168,8 +1170,8 @@ func rewriteValueMIPS64_OpDiv32u_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div64 x y)
// cond:
// result: (Select1 (DIVV x y))
@@ -1177,7 +1179,7 @@ func rewriteValueMIPS64_OpDiv64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1200,8 +1202,8 @@ func rewriteValueMIPS64_OpDiv64F_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv64u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div64u x y)
// cond:
// result: (Select1 (DIVVU x y))
@@ -1209,7 +1211,7 @@ func rewriteValueMIPS64_OpDiv64u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1219,8 +1221,8 @@ func rewriteValueMIPS64_OpDiv64u_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
@@ -1228,11 +1230,11 @@ func rewriteValueMIPS64_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1242,8 +1244,8 @@ func rewriteValueMIPS64_OpDiv8_0(v *Value) bool {
func rewriteValueMIPS64_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
@@ -1251,11 +1253,11 @@ func rewriteValueMIPS64_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1265,8 +1267,8 @@ func rewriteValueMIPS64_OpDiv8u_0(v *Value) bool {
func rewriteValueMIPS64_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
@@ -1274,14 +1276,14 @@ func rewriteValueMIPS64_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1291,8 +1293,8 @@ func rewriteValueMIPS64_OpEq16_0(v *Value) bool {
func rewriteValueMIPS64_OpEq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq32 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
@@ -1300,14 +1302,14 @@ func rewriteValueMIPS64_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1324,7 +1326,7 @@ func rewriteValueMIPS64_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1334,8 +1336,8 @@ func rewriteValueMIPS64_OpEq32F_0(v *Value) bool {
func rewriteValueMIPS64_OpEq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq64 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR x y))
@@ -1343,10 +1345,10 @@ func rewriteValueMIPS64_OpEq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
@@ -1363,7 +1365,7 @@ func rewriteValueMIPS64_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1373,8 +1375,8 @@ func rewriteValueMIPS64_OpEq64F_0(v *Value) bool {
func rewriteValueMIPS64_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
@@ -1382,14 +1384,14 @@ func rewriteValueMIPS64_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1399,19 +1401,19 @@ func rewriteValueMIPS64_OpEq8_0(v *Value) bool {
func rewriteValueMIPS64_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
- // result: (XOR (MOVVconst [1]) (XOR <types.Bool> x y))
+ // result: (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool)
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
@@ -1421,8 +1423,8 @@ func rewriteValueMIPS64_OpEqB_0(v *Value) bool {
func rewriteValueMIPS64_OpEqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqPtr x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR x y))
@@ -1430,10 +1432,10 @@ func rewriteValueMIPS64_OpEqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
@@ -1443,8 +1445,8 @@ func rewriteValueMIPS64_OpEqPtr_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 y) (SignExt16to64 x)))
@@ -1452,14 +1454,14 @@ func rewriteValueMIPS64_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1469,8 +1471,8 @@ func rewriteValueMIPS64_OpGeq16_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)))
@@ -1478,14 +1480,14 @@ func rewriteValueMIPS64_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1495,8 +1497,8 @@ func rewriteValueMIPS64_OpGeq16U_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 y) (SignExt32to64 x)))
@@ -1504,14 +1506,14 @@ func rewriteValueMIPS64_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1528,7 +1530,7 @@ func rewriteValueMIPS64_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1538,8 +1540,8 @@ func rewriteValueMIPS64_OpGeq32F_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)))
@@ -1547,14 +1549,14 @@ func rewriteValueMIPS64_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1564,8 +1566,8 @@ func rewriteValueMIPS64_OpGeq32U_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT y x))
@@ -1573,10 +1575,10 @@ func rewriteValueMIPS64_OpGeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
v1.AddArg(y)
v1.AddArg(x)
v.AddArg(v1)
@@ -1593,7 +1595,7 @@ func rewriteValueMIPS64_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1603,8 +1605,8 @@ func rewriteValueMIPS64_OpGeq64F_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU y x))
@@ -1612,10 +1614,10 @@ func rewriteValueMIPS64_OpGeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v1.AddArg(y)
v1.AddArg(x)
v.AddArg(v1)
@@ -1625,8 +1627,8 @@ func rewriteValueMIPS64_OpGeq64U_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 y) (SignExt8to64 x)))
@@ -1634,14 +1636,14 @@ func rewriteValueMIPS64_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1651,8 +1653,8 @@ func rewriteValueMIPS64_OpGeq8_0(v *Value) bool {
func rewriteValueMIPS64_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)))
@@ -1660,14 +1662,14 @@ func rewriteValueMIPS64_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
@@ -1686,8 +1688,8 @@ func rewriteValueMIPS64_OpGetClosurePtr_0(v *Value) bool {
func rewriteValueMIPS64_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (SGT (SignExt16to64 x) (SignExt16to64 y))
@@ -1695,10 +1697,10 @@ func rewriteValueMIPS64_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1707,8 +1709,8 @@ func rewriteValueMIPS64_OpGreater16_0(v *Value) bool {
func rewriteValueMIPS64_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))
@@ -1716,10 +1718,10 @@ func rewriteValueMIPS64_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1728,8 +1730,8 @@ func rewriteValueMIPS64_OpGreater16U_0(v *Value) bool {
func rewriteValueMIPS64_OpGreater32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater32 x y)
// cond:
// result: (SGT (SignExt32to64 x) (SignExt32to64 y))
@@ -1737,10 +1739,10 @@ func rewriteValueMIPS64_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1756,7 +1758,7 @@ func rewriteValueMIPS64_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1766,8 +1768,8 @@ func rewriteValueMIPS64_OpGreater32F_0(v *Value) bool {
func rewriteValueMIPS64_OpGreater32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater32U x y)
// cond:
// result: (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))
@@ -1775,10 +1777,10 @@ func rewriteValueMIPS64_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1807,7 +1809,7 @@ func rewriteValueMIPS64_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1830,8 +1832,8 @@ func rewriteValueMIPS64_OpGreater64U_0(v *Value) bool {
func rewriteValueMIPS64_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (SGT (SignExt8to64 x) (SignExt8to64 y))
@@ -1839,10 +1841,10 @@ func rewriteValueMIPS64_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1851,8 +1853,8 @@ func rewriteValueMIPS64_OpGreater8_0(v *Value) bool {
func rewriteValueMIPS64_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))
@@ -1860,10 +1862,10 @@ func rewriteValueMIPS64_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1872,22 +1874,22 @@ func rewriteValueMIPS64_OpGreater8U_0(v *Value) bool {
func rewriteValueMIPS64_OpHmul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32 x y)
// cond:
- // result: (SRAVconst (Select1 <types.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+ // result: (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAVconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpSelect1, types.Int64)
- v1 := b.NewValue0(v.Pos, OpMIPS64MULV, MakeTuple(types.Int64, types.Int64))
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
@@ -1898,22 +1900,22 @@ func rewriteValueMIPS64_OpHmul32_0(v *Value) bool {
func rewriteValueMIPS64_OpHmul32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32u x y)
// cond:
- // result: (SRLVconst (Select1 <types.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+ // result: (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRLVconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpSelect1, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(types.UInt64, types.UInt64))
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
@@ -1924,8 +1926,8 @@ func rewriteValueMIPS64_OpHmul32u_0(v *Value) bool {
func rewriteValueMIPS64_OpHmul64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul64 x y)
// cond:
// result: (Select0 (MULV x y))
@@ -1933,7 +1935,7 @@ func rewriteValueMIPS64_OpHmul64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULV, MakeTuple(types.Int64, types.Int64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1943,8 +1945,8 @@ func rewriteValueMIPS64_OpHmul64_0(v *Value) bool {
func rewriteValueMIPS64_OpHmul64u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul64u x y)
// cond:
// result: (Select0 (MULVU x y))
@@ -1952,7 +1954,7 @@ func rewriteValueMIPS64_OpHmul64u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1990,8 +1992,8 @@ func rewriteValueMIPS64_OpIsInBounds_0(v *Value) bool {
func rewriteValueMIPS64_OpIsNonNil_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsNonNil ptr)
// cond:
// result: (SGTU ptr (MOVVconst [0]))
@@ -1999,7 +2001,7 @@ func rewriteValueMIPS64_OpIsNonNil_0(v *Value) bool {
ptr := v.Args[0]
v.reset(OpMIPS64SGTU)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
return true
@@ -2008,8 +2010,8 @@ func rewriteValueMIPS64_OpIsNonNil_0(v *Value) bool {
func rewriteValueMIPS64_OpIsSliceInBounds_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsSliceInBounds idx len)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU idx len))
@@ -2017,10 +2019,10 @@ func rewriteValueMIPS64_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v1.AddArg(idx)
v1.AddArg(len)
v.AddArg(v1)
@@ -2030,8 +2032,8 @@ func rewriteValueMIPS64_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
@@ -2039,14 +2041,14 @@ func rewriteValueMIPS64_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -2056,8 +2058,8 @@ func rewriteValueMIPS64_OpLeq16_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
@@ -2065,14 +2067,14 @@ func rewriteValueMIPS64_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -2082,8 +2084,8 @@ func rewriteValueMIPS64_OpLeq16U_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
@@ -2091,14 +2093,14 @@ func rewriteValueMIPS64_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -2115,7 +2117,7 @@ func rewriteValueMIPS64_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2125,8 +2127,8 @@ func rewriteValueMIPS64_OpLeq32F_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
@@ -2134,14 +2136,14 @@ func rewriteValueMIPS64_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -2151,8 +2153,8 @@ func rewriteValueMIPS64_OpLeq32U_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT x y))
@@ -2160,10 +2162,10 @@ func rewriteValueMIPS64_OpLeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
@@ -2180,7 +2182,7 @@ func rewriteValueMIPS64_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2190,8 +2192,8 @@ func rewriteValueMIPS64_OpLeq64F_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU x y))
@@ -2199,10 +2201,10 @@ func rewriteValueMIPS64_OpLeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
@@ -2212,8 +2214,8 @@ func rewriteValueMIPS64_OpLeq64U_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
@@ -2221,14 +2223,14 @@ func rewriteValueMIPS64_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, types.Bool)
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -2238,8 +2240,8 @@ func rewriteValueMIPS64_OpLeq8_0(v *Value) bool {
func rewriteValueMIPS64_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
@@ -2247,14 +2249,14 @@ func rewriteValueMIPS64_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
@@ -2264,8 +2266,8 @@ func rewriteValueMIPS64_OpLeq8U_0(v *Value) bool {
func rewriteValueMIPS64_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (SGT (SignExt16to64 y) (SignExt16to64 x))
@@ -2273,10 +2275,10 @@ func rewriteValueMIPS64_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2285,8 +2287,8 @@ func rewriteValueMIPS64_OpLess16_0(v *Value) bool {
func rewriteValueMIPS64_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
@@ -2294,10 +2296,10 @@ func rewriteValueMIPS64_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2306,8 +2308,8 @@ func rewriteValueMIPS64_OpLess16U_0(v *Value) bool {
func rewriteValueMIPS64_OpLess32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less32 x y)
// cond:
// result: (SGT (SignExt32to64 y) (SignExt32to64 x))
@@ -2315,10 +2317,10 @@ func rewriteValueMIPS64_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2334,7 +2336,7 @@ func rewriteValueMIPS64_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2344,8 +2346,8 @@ func rewriteValueMIPS64_OpLess32F_0(v *Value) bool {
func rewriteValueMIPS64_OpLess32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less32U x y)
// cond:
// result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
@@ -2353,10 +2355,10 @@ func rewriteValueMIPS64_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2385,7 +2387,7 @@ func rewriteValueMIPS64_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2408,8 +2410,8 @@ func rewriteValueMIPS64_OpLess64U_0(v *Value) bool {
func rewriteValueMIPS64_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (SGT (SignExt8to64 y) (SignExt8to64 x))
@@ -2417,10 +2419,10 @@ func rewriteValueMIPS64_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2429,8 +2431,8 @@ func rewriteValueMIPS64_OpLess8_0(v *Value) bool {
func rewriteValueMIPS64_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
@@ -2438,10 +2440,10 @@ func rewriteValueMIPS64_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2603,29 +2605,29 @@ func rewriteValueMIPS64_OpLoad_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2635,29 +2637,29 @@ func rewriteValueMIPS64_OpLsh16x16_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2667,19 +2669,19 @@ func rewriteValueMIPS64_OpLsh16x32_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
@@ -2695,29 +2697,29 @@ func rewriteValueMIPS64_OpLsh16x64_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2727,29 +2729,29 @@ func rewriteValueMIPS64_OpLsh16x8_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2759,29 +2761,29 @@ func rewriteValueMIPS64_OpLsh32x16_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2791,19 +2793,19 @@ func rewriteValueMIPS64_OpLsh32x32_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
@@ -2819,29 +2821,29 @@ func rewriteValueMIPS64_OpLsh32x64_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2851,29 +2853,29 @@ func rewriteValueMIPS64_OpLsh32x8_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2883,29 +2885,29 @@ func rewriteValueMIPS64_OpLsh64x16_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2915,19 +2917,19 @@ func rewriteValueMIPS64_OpLsh64x32_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
@@ -2943,29 +2945,29 @@ func rewriteValueMIPS64_OpLsh64x64_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -2975,29 +2977,29 @@ func rewriteValueMIPS64_OpLsh64x8_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -3007,29 +3009,29 @@ func rewriteValueMIPS64_OpLsh8x16_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -3039,19 +3041,19 @@ func rewriteValueMIPS64_OpLsh8x32_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
@@ -3067,29 +3069,29 @@ func rewriteValueMIPS64_OpLsh8x64_0(v *Value) bool {
func rewriteValueMIPS64_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -6096,8 +6098,8 @@ func rewriteValueMIPS64_OpMIPS64XORconst_0(v *Value) bool {
func rewriteValueMIPS64_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
@@ -6105,11 +6107,11 @@ func rewriteValueMIPS64_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6119,8 +6121,8 @@ func rewriteValueMIPS64_OpMod16_0(v *Value) bool {
func rewriteValueMIPS64_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
@@ -6128,11 +6130,11 @@ func rewriteValueMIPS64_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6142,8 +6144,8 @@ func rewriteValueMIPS64_OpMod16u_0(v *Value) bool {
func rewriteValueMIPS64_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
// result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
@@ -6151,11 +6153,11 @@ func rewriteValueMIPS64_OpMod32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6165,8 +6167,8 @@ func rewriteValueMIPS64_OpMod32_0(v *Value) bool {
func rewriteValueMIPS64_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
// result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
@@ -6174,11 +6176,11 @@ func rewriteValueMIPS64_OpMod32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6188,8 +6190,8 @@ func rewriteValueMIPS64_OpMod32u_0(v *Value) bool {
func rewriteValueMIPS64_OpMod64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod64 x y)
// cond:
// result: (Select0 (DIVV x y))
@@ -6197,7 +6199,7 @@ func rewriteValueMIPS64_OpMod64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6207,8 +6209,8 @@ func rewriteValueMIPS64_OpMod64_0(v *Value) bool {
func rewriteValueMIPS64_OpMod64u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod64u x y)
// cond:
// result: (Select0 (DIVVU x y))
@@ -6216,7 +6218,7 @@ func rewriteValueMIPS64_OpMod64u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6226,8 +6228,8 @@ func rewriteValueMIPS64_OpMod64u_0(v *Value) bool {
func rewriteValueMIPS64_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
@@ -6235,11 +6237,11 @@ func rewriteValueMIPS64_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(types.Int64, types.Int64))
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6249,8 +6251,8 @@ func rewriteValueMIPS64_OpMod8_0(v *Value) bool {
func rewriteValueMIPS64_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
@@ -6258,11 +6260,11 @@ func rewriteValueMIPS64_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(types.UInt64, types.UInt64))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6272,8 +6274,8 @@ func rewriteValueMIPS64_OpMod8u_0(v *Value) bool {
func rewriteValueMIPS64_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -6299,7 +6301,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpMIPS64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6307,7 +6309,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore dst (MOVHload src mem) mem)
for {
if v.AuxInt != 2 {
@@ -6317,12 +6319,12 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6342,14 +6344,14 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6358,7 +6360,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
@@ -6368,12 +6370,12 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6381,7 +6383,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
for {
if v.AuxInt != 4 {
@@ -6391,20 +6393,20 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6425,30 +6427,30 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6459,7 +6461,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(*types.Type).Alignment()%8 == 0
// result: (MOVVstore dst (MOVVload src mem) mem)
for {
if v.AuxInt != 8 {
@@ -6469,12 +6471,12 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(*types.Type).Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6482,7 +6484,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
if v.AuxInt != 8 {
@@ -6492,20 +6494,20 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6514,7 +6516,7 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
if v.AuxInt != 8 {
@@ -6524,36 +6526,36 @@ func rewriteValueMIPS64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6570,8 +6572,8 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [3] dst src mem)
// cond:
// result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
@@ -6585,22 +6587,22 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, types.Int8)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6610,7 +6612,7 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
return true
}
// match: (Move [6] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
if v.AuxInt != 6 {
@@ -6620,28 +6622,28 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, types.Int16)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6651,7 +6653,7 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
if v.AuxInt != 12 {
@@ -6661,28 +6663,28 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, types.Int32)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6692,7 +6694,7 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(*types.Type).Alignment()%8 == 0
// result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
for {
if v.AuxInt != 16 {
@@ -6702,20 +6704,20 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(*types.Type).Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6724,7 +6726,7 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
return true
}
// match: (Move [24] {t} dst src mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(*types.Type).Alignment()%8 == 0
// result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
for {
if v.AuxInt != 24 {
@@ -6734,28 +6736,28 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(*types.Type).Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 16
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
v0.AuxInt = 16
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6765,23 +6767,23 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s > 24 || t.(Type).Alignment()%8 != 0
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: s > 24 || t.(*types.Type).Alignment()%8 != 0
+ // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s > 24 || t.(Type).Alignment()%8 != 0) {
+ if !(s > 24 || t.(*types.Type).Alignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
@@ -6792,8 +6794,8 @@ func rewriteValueMIPS64_OpMove_10(v *Value) bool {
func rewriteValueMIPS64_OpMul16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul16 x y)
// cond:
// result: (Select1 (MULVU x y))
@@ -6801,7 +6803,7 @@ func rewriteValueMIPS64_OpMul16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6811,8 +6813,8 @@ func rewriteValueMIPS64_OpMul16_0(v *Value) bool {
func rewriteValueMIPS64_OpMul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul32 x y)
// cond:
// result: (Select1 (MULVU x y))
@@ -6820,7 +6822,7 @@ func rewriteValueMIPS64_OpMul32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6843,8 +6845,8 @@ func rewriteValueMIPS64_OpMul32F_0(v *Value) bool {
func rewriteValueMIPS64_OpMul64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul64 x y)
// cond:
// result: (Select1 (MULVU x y))
@@ -6852,7 +6854,7 @@ func rewriteValueMIPS64_OpMul64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6875,8 +6877,8 @@ func rewriteValueMIPS64_OpMul64F_0(v *Value) bool {
func rewriteValueMIPS64_OpMul8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul8 x y)
// cond:
// result: (Select1 (MULVU x y))
@@ -6884,7 +6886,7 @@ func rewriteValueMIPS64_OpMul8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(types.UInt64, types.UInt64))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6960,8 +6962,8 @@ func rewriteValueMIPS64_OpNeg8_0(v *Value) bool {
func rewriteValueMIPS64_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond:
// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
@@ -6969,15 +6971,15 @@ func rewriteValueMIPS64_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -6986,8 +6988,8 @@ func rewriteValueMIPS64_OpNeq16_0(v *Value) bool {
func rewriteValueMIPS64_OpNeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq32 x y)
// cond:
// result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
@@ -6995,15 +6997,15 @@ func rewriteValueMIPS64_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -7019,7 +7021,7 @@ func rewriteValueMIPS64_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagFalse)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -7029,8 +7031,8 @@ func rewriteValueMIPS64_OpNeq32F_0(v *Value) bool {
func rewriteValueMIPS64_OpNeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq64 x y)
// cond:
// result: (SGTU (XOR x y) (MOVVconst [0]))
@@ -7038,11 +7040,11 @@ func rewriteValueMIPS64_OpNeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v1.AuxInt = 0
v.AddArg(v1)
return true
@@ -7058,7 +7060,7 @@ func rewriteValueMIPS64_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64FPFlagFalse)
- v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -7068,8 +7070,8 @@ func rewriteValueMIPS64_OpNeq64F_0(v *Value) bool {
func rewriteValueMIPS64_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond:
// result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
@@ -7077,15 +7079,15 @@ func rewriteValueMIPS64_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -7107,8 +7109,8 @@ func rewriteValueMIPS64_OpNeqB_0(v *Value) bool {
func rewriteValueMIPS64_OpNeqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqPtr x y)
// cond:
// result: (SGTU (XOR x y) (MOVVconst [0]))
@@ -7116,11 +7118,11 @@ func rewriteValueMIPS64_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v1.AuxInt = 0
v.AddArg(v1)
return true
@@ -7270,31 +7272,31 @@ func rewriteValueMIPS64_OpRound64F_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -7304,31 +7306,31 @@ func rewriteValueMIPS64_OpRsh16Ux16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -7338,26 +7340,26 @@ func rewriteValueMIPS64_OpRsh16Ux32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(x)
v3.AddArg(v4)
v3.AddArg(y)
@@ -7368,31 +7370,31 @@ func rewriteValueMIPS64_OpRsh16Ux64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -7402,31 +7404,31 @@ func rewriteValueMIPS64_OpRsh16Ux8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -7436,31 +7438,31 @@ func rewriteValueMIPS64_OpRsh16x16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -7470,24 +7472,24 @@ func rewriteValueMIPS64_OpRsh16x32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v3.AddArg(y)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 63
v3.AddArg(v4)
v2.AddArg(v3)
@@ -7500,31 +7502,31 @@ func rewriteValueMIPS64_OpRsh16x64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -7534,31 +7536,31 @@ func rewriteValueMIPS64_OpRsh16x8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -7568,31 +7570,31 @@ func rewriteValueMIPS64_OpRsh32Ux16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -7602,26 +7604,26 @@ func rewriteValueMIPS64_OpRsh32Ux32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(x)
v3.AddArg(v4)
v3.AddArg(y)
@@ -7632,31 +7634,31 @@ func rewriteValueMIPS64_OpRsh32Ux64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -7666,31 +7668,31 @@ func rewriteValueMIPS64_OpRsh32Ux8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -7700,31 +7702,31 @@ func rewriteValueMIPS64_OpRsh32x16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x32 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -7734,24 +7736,24 @@ func rewriteValueMIPS64_OpRsh32x32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x64 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v3.AddArg(y)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 63
v3.AddArg(v4)
v2.AddArg(v3)
@@ -7764,31 +7766,31 @@ func rewriteValueMIPS64_OpRsh32x64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -7798,29 +7800,29 @@ func rewriteValueMIPS64_OpRsh32x8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7830,29 +7832,29 @@ func rewriteValueMIPS64_OpRsh64Ux16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7862,19 +7864,19 @@ func rewriteValueMIPS64_OpRsh64Ux32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
@@ -7890,29 +7892,29 @@ func rewriteValueMIPS64_OpRsh64Ux64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7922,11 +7924,11 @@ func rewriteValueMIPS64_OpRsh64Ux8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x16 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
@@ -7935,16 +7937,16 @@ func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool {
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 63
v2.AddArg(v4)
v1.AddArg(v2)
v0.AddArg(v1)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v5.AddArg(y)
v0.AddArg(v5)
v.AddArg(v0)
@@ -7954,11 +7956,11 @@ func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x32 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
@@ -7967,16 +7969,16 @@ func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool {
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 63
v2.AddArg(v4)
v1.AddArg(v2)
v0.AddArg(v1)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v5.AddArg(y)
v0.AddArg(v5)
v.AddArg(v0)
@@ -7986,11 +7988,11 @@ func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x64 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v.Args[0]
@@ -7999,9 +8001,9 @@ func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool {
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v2.AddArg(y)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 63
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8014,11 +8016,11 @@ func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x8 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
@@ -8027,16 +8029,16 @@ func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool {
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 63
v2.AddArg(v4)
v1.AddArg(v2)
v0.AddArg(v1)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(y)
v0.AddArg(v5)
v.AddArg(v0)
@@ -8046,31 +8048,31 @@ func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -8080,31 +8082,31 @@ func rewriteValueMIPS64_OpRsh8Ux16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -8114,26 +8116,26 @@ func rewriteValueMIPS64_OpRsh8Ux32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(x)
v3.AddArg(v4)
v3.AddArg(y)
@@ -8144,31 +8146,31 @@ func rewriteValueMIPS64_OpRsh8Ux64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
@@ -8178,31 +8180,31 @@ func rewriteValueMIPS64_OpRsh8Ux8_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -8212,31 +8214,31 @@ func rewriteValueMIPS64_OpRsh8x16_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -8246,24 +8248,24 @@ func rewriteValueMIPS64_OpRsh8x32_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <typ.UInt64> [63]))) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
v3.AddArg(y)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 63
v3.AddArg(v4)
v2.AddArg(v3)
@@ -8276,31 +8278,31 @@ func rewriteValueMIPS64_OpRsh8x64_0(v *Value) bool {
func rewriteValueMIPS64_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <typ.UInt64> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, types.Bool)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
@@ -8963,14 +8965,14 @@ func rewriteValueMIPS64_OpStaticCall_0(v *Value) bool {
}
func rewriteValueMIPS64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpMIPS64MOVBstore)
@@ -8980,14 +8982,14 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpMIPS64MOVHstore)
@@ -8997,14 +8999,14 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVWstore)
@@ -9014,14 +9016,14 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && !is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)
// result: (MOVVstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVVstore)
@@ -9031,14 +9033,14 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVFstore)
@@ -9048,14 +9050,14 @@ func rewriteValueMIPS64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVDstore)
@@ -9284,8 +9286,8 @@ func rewriteValueMIPS64_OpXor8_0(v *Value) bool {
func rewriteValueMIPS64_OpZero_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [0] _ mem)
// cond:
// result: mem
@@ -9310,14 +9312,14 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpMIPS64MOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore ptr (MOVVconst [0]) mem)
for {
if v.AuxInt != 2 {
@@ -9326,12 +9328,12 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -9349,13 +9351,13 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -9363,7 +9365,7 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore ptr (MOVVconst [0]) mem)
for {
if v.AuxInt != 4 {
@@ -9372,19 +9374,19 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
for {
if v.AuxInt != 4 {
@@ -9393,19 +9395,19 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -9424,25 +9426,25 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -9452,7 +9454,7 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(*types.Type).Alignment()%8 == 0
// result: (MOVVstore ptr (MOVVconst [0]) mem)
for {
if v.AuxInt != 8 {
@@ -9461,19 +9463,19 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(*types.Type).Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
for {
if v.AuxInt != 8 {
@@ -9482,19 +9484,19 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -9502,7 +9504,7 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
for {
if v.AuxInt != 8 {
@@ -9511,31 +9513,31 @@ func rewriteValueMIPS64_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 6
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v3.AuxInt = 2
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -9551,8 +9553,8 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [3] ptr mem)
// cond:
// result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))
@@ -9565,19 +9567,19 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9586,7 +9588,7 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
for {
if v.AuxInt != 6 {
@@ -9595,25 +9597,25 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9622,7 +9624,7 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [12] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
for {
if v.AuxInt != 12 {
@@ -9631,25 +9633,25 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9658,7 +9660,7 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(*types.Type).Alignment()%8 == 0
// result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
for {
if v.AuxInt != 16 {
@@ -9667,19 +9669,19 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(*types.Type).Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -9687,7 +9689,7 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [24] {t} ptr mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(*types.Type).Alignment()%8 == 0
// result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
for {
if v.AuxInt != 24 {
@@ -9696,25 +9698,25 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(*types.Type).Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 16
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9723,14 +9725,14 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.(Type).Alignment()%8 == 0 && !config.noDuffDevice
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice
// result: (DUFFZERO [8 * (128 - int64(s/8))] ptr mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(Type).Alignment()%8 == 0 && !config.noDuffDevice) {
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpMIPS64DUFFZERO)
@@ -9740,21 +9742,21 @@ func rewriteValueMIPS64_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 8*128 || config.noDuffDevice) || t.(Type).Alignment()%8 != 0
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0
+ // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !((s > 8*128 || config.noDuffDevice) || t.(Type).Alignment()%8 != 0) {
+ if !((s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
@@ -9833,8 +9835,8 @@ func rewriteBlockMIPS64(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockMIPS64EQ:
// match: (EQ (FPFlagTrue cmp) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 70f8de1791..e3eab943d7 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValuePPC64(v *Value) bool {
switch v.Op {
@@ -1156,15 +1158,15 @@ func rewriteValuePPC64_OpConvert_0(v *Value) bool {
func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt32Fto32 x)
// cond:
// result: (Xf2i64 (FCTIWZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1173,15 +1175,15 @@ func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool {
func rewriteValuePPC64_OpCvt32Fto64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt32Fto64 x)
// cond:
// result: (Xf2i64 (FCTIDZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1202,17 +1204,17 @@ func rewriteValuePPC64_OpCvt32Fto64F_0(v *Value) bool {
func rewriteValuePPC64_OpCvt32to32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt32to32F x)
// cond:
// result: (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
- v0 := b.NewValue0(v.Pos, OpPPC64FCFID, types.Float64)
- v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, types.Float64)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCFID, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v2.AddArg(x)
v1.AddArg(v2)
v0.AddArg(v1)
@@ -1223,16 +1225,16 @@ func rewriteValuePPC64_OpCvt32to32F_0(v *Value) bool {
func rewriteValuePPC64_OpCvt32to64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt32to64F x)
// cond:
// result: (FCFID (Xi2f64 (SignExt32to64 x)))
for {
x := v.Args[0]
v.reset(OpPPC64FCFID)
- v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, types.Float64)
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
@@ -1242,15 +1244,15 @@ func rewriteValuePPC64_OpCvt32to64F_0(v *Value) bool {
func rewriteValuePPC64_OpCvt64Fto32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt64Fto32 x)
// cond:
// result: (Xf2i64 (FCTIWZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1270,15 +1272,15 @@ func rewriteValuePPC64_OpCvt64Fto32F_0(v *Value) bool {
func rewriteValuePPC64_OpCvt64Fto64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt64Fto64 x)
// cond:
// result: (Xf2i64 (FCTIDZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1287,16 +1289,16 @@ func rewriteValuePPC64_OpCvt64Fto64_0(v *Value) bool {
func rewriteValuePPC64_OpCvt64to32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt64to32F x)
// cond:
// result: (FRSP (FCFID (Xi2f64 x)))
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
- v0 := b.NewValue0(v.Pos, OpPPC64FCFID, types.Float64)
- v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCFID, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
@@ -1306,15 +1308,15 @@ func rewriteValuePPC64_OpCvt64to32F_0(v *Value) bool {
func rewriteValuePPC64_OpCvt64to64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Cvt64to64F x)
// cond:
// result: (FCFID (Xi2f64 x))
for {
x := v.Args[0]
v.reset(OpPPC64FCFID)
- v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1323,8 +1325,8 @@ func rewriteValuePPC64_OpCvt64to64F_0(v *Value) bool {
func rewriteValuePPC64_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
@@ -1332,10 +1334,10 @@ func rewriteValuePPC64_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1344,8 +1346,8 @@ func rewriteValuePPC64_OpDiv16_0(v *Value) bool {
func rewriteValuePPC64_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -1353,10 +1355,10 @@ func rewriteValuePPC64_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1443,8 +1445,8 @@ func rewriteValuePPC64_OpDiv64u_0(v *Value) bool {
func rewriteValuePPC64_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
@@ -1452,10 +1454,10 @@ func rewriteValuePPC64_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1464,8 +1466,8 @@ func rewriteValuePPC64_OpDiv8_0(v *Value) bool {
func rewriteValuePPC64_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -1473,10 +1475,10 @@ func rewriteValuePPC64_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1485,8 +1487,8 @@ func rewriteValuePPC64_OpDiv8u_0(v *Value) bool {
func rewriteValuePPC64_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -1497,11 +1499,11 @@ func rewriteValuePPC64_OpEq16_0(v *Value) bool {
break
}
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1514,11 +1516,11 @@ func rewriteValuePPC64_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1535,7 +1537,7 @@ func rewriteValuePPC64_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1552,7 +1554,7 @@ func rewriteValuePPC64_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1569,7 +1571,7 @@ func rewriteValuePPC64_OpEq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1586,7 +1588,7 @@ func rewriteValuePPC64_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1596,8 +1598,8 @@ func rewriteValuePPC64_OpEq64F_0(v *Value) bool {
func rewriteValuePPC64_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -1608,11 +1610,11 @@ func rewriteValuePPC64_OpEq8_0(v *Value) bool {
break
}
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1625,11 +1627,11 @@ func rewriteValuePPC64_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1639,8 +1641,8 @@ func rewriteValuePPC64_OpEq8_0(v *Value) bool {
func rewriteValuePPC64_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
// result: (ANDconst [1] (EQV x y))
@@ -1649,7 +1651,7 @@ func rewriteValuePPC64_OpEqB_0(v *Value) bool {
y := v.Args[1]
v.reset(OpPPC64ANDconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpPPC64EQV, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1666,7 +1668,7 @@ func rewriteValuePPC64_OpEqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64Equal)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1676,8 +1678,8 @@ func rewriteValuePPC64_OpEqPtr_0(v *Value) bool {
func rewriteValuePPC64_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -1685,11 +1687,11 @@ func rewriteValuePPC64_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1699,8 +1701,8 @@ func rewriteValuePPC64_OpGeq16_0(v *Value) bool {
func rewriteValuePPC64_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -1708,11 +1710,11 @@ func rewriteValuePPC64_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1729,7 +1731,7 @@ func rewriteValuePPC64_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1746,7 +1748,7 @@ func rewriteValuePPC64_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1763,7 +1765,7 @@ func rewriteValuePPC64_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1780,7 +1782,7 @@ func rewriteValuePPC64_OpGeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1797,7 +1799,7 @@ func rewriteValuePPC64_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1814,7 +1816,7 @@ func rewriteValuePPC64_OpGeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1824,8 +1826,8 @@ func rewriteValuePPC64_OpGeq64U_0(v *Value) bool {
func rewriteValuePPC64_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -1833,11 +1835,11 @@ func rewriteValuePPC64_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1847,8 +1849,8 @@ func rewriteValuePPC64_OpGeq8_0(v *Value) bool {
func rewriteValuePPC64_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -1856,11 +1858,11 @@ func rewriteValuePPC64_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1879,8 +1881,8 @@ func rewriteValuePPC64_OpGetClosurePtr_0(v *Value) bool {
func rewriteValuePPC64_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -1888,11 +1890,11 @@ func rewriteValuePPC64_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1902,8 +1904,8 @@ func rewriteValuePPC64_OpGreater16_0(v *Value) bool {
func rewriteValuePPC64_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -1911,11 +1913,11 @@ func rewriteValuePPC64_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1932,7 +1934,7 @@ func rewriteValuePPC64_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1949,7 +1951,7 @@ func rewriteValuePPC64_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1966,7 +1968,7 @@ func rewriteValuePPC64_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1983,7 +1985,7 @@ func rewriteValuePPC64_OpGreater64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2000,7 +2002,7 @@ func rewriteValuePPC64_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FGreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2017,7 +2019,7 @@ func rewriteValuePPC64_OpGreater64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2027,8 +2029,8 @@ func rewriteValuePPC64_OpGreater64U_0(v *Value) bool {
func rewriteValuePPC64_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -2036,11 +2038,11 @@ func rewriteValuePPC64_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2050,8 +2052,8 @@ func rewriteValuePPC64_OpGreater8_0(v *Value) bool {
func rewriteValuePPC64_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -2059,11 +2061,11 @@ func rewriteValuePPC64_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2147,7 +2149,7 @@ func rewriteValuePPC64_OpIsInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -2163,7 +2165,7 @@ func rewriteValuePPC64_OpIsNonNil_0(v *Value) bool {
for {
ptr := v.Args[0]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(ptr)
v.AddArg(v0)
@@ -2180,7 +2182,7 @@ func rewriteValuePPC64_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -2190,8 +2192,8 @@ func rewriteValuePPC64_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValuePPC64_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -2199,11 +2201,11 @@ func rewriteValuePPC64_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2213,8 +2215,8 @@ func rewriteValuePPC64_OpLeq16_0(v *Value) bool {
func rewriteValuePPC64_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -2222,11 +2224,11 @@ func rewriteValuePPC64_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2243,7 +2245,7 @@ func rewriteValuePPC64_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2260,7 +2262,7 @@ func rewriteValuePPC64_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2277,7 +2279,7 @@ func rewriteValuePPC64_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2294,7 +2296,7 @@ func rewriteValuePPC64_OpLeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2311,7 +2313,7 @@ func rewriteValuePPC64_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2328,7 +2330,7 @@ func rewriteValuePPC64_OpLeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2338,8 +2340,8 @@ func rewriteValuePPC64_OpLeq64U_0(v *Value) bool {
func rewriteValuePPC64_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -2347,11 +2349,11 @@ func rewriteValuePPC64_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2361,8 +2363,8 @@ func rewriteValuePPC64_OpLeq8_0(v *Value) bool {
func rewriteValuePPC64_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -2370,11 +2372,11 @@ func rewriteValuePPC64_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2384,8 +2386,8 @@ func rewriteValuePPC64_OpLeq8U_0(v *Value) bool {
func rewriteValuePPC64_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -2393,11 +2395,11 @@ func rewriteValuePPC64_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2407,8 +2409,8 @@ func rewriteValuePPC64_OpLess16_0(v *Value) bool {
func rewriteValuePPC64_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -2416,11 +2418,11 @@ func rewriteValuePPC64_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2437,7 +2439,7 @@ func rewriteValuePPC64_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2454,7 +2456,7 @@ func rewriteValuePPC64_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2471,7 +2473,7 @@ func rewriteValuePPC64_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2488,7 +2490,7 @@ func rewriteValuePPC64_OpLess64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2505,7 +2507,7 @@ func rewriteValuePPC64_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FLessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2522,7 +2524,7 @@ func rewriteValuePPC64_OpLess64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2532,8 +2534,8 @@ func rewriteValuePPC64_OpLess64U_0(v *Value) bool {
func rewriteValuePPC64_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -2541,11 +2543,11 @@ func rewriteValuePPC64_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2555,8 +2557,8 @@ func rewriteValuePPC64_OpLess8_0(v *Value) bool {
func rewriteValuePPC64_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -2564,11 +2566,11 @@ func rewriteValuePPC64_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64LessThan)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2578,8 +2580,8 @@ func rewriteValuePPC64_OpLess8U_0(v *Value) bool {
func rewriteValuePPC64_OpLoad_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVDload ptr mem)
@@ -2681,7 +2683,7 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool {
break
}
v.reset(OpPPC64MOVBreg)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
@@ -2737,22 +2739,22 @@ func rewriteValuePPC64_OpLoad_0(v *Value) bool {
func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -16
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -2764,8 +2766,8 @@ func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool {
func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x32 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SLWconst x [c])
@@ -2804,18 +2806,18 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool {
}
// match: (Lsh16x32 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -16
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -2827,8 +2829,8 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool {
func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SLWconst x [c])
@@ -2883,16 +2885,16 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool {
}
// match: (Lsh16x64 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -16
v2.AddArg(y)
v1.AddArg(v2)
@@ -2904,22 +2906,22 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool {
func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -16
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -2931,22 +2933,22 @@ func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool {
func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -2958,8 +2960,8 @@ func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool {
func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x32 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SLWconst x [c])
@@ -2998,18 +3000,18 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool {
}
// match: (Lsh32x32 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3021,8 +3023,8 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool {
func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SLWconst x [c])
@@ -3077,16 +3079,16 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool {
}
// match: (Lsh32x64 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
v1.AddArg(v2)
@@ -3098,22 +3100,22 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool {
func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3125,22 +3127,22 @@ func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool {
func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x16 x y)
// cond:
- // result: (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ // result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3152,8 +3154,8 @@ func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool {
func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x32 x (Const64 [c]))
// cond: uint32(c) < 64
// result: (SLDconst x [c])
@@ -3192,18 +3194,18 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool {
}
// match: (Lsh64x32 x y)
// cond:
- // result: (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ // result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3215,8 +3217,8 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool {
func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x64 x (Const64 [c]))
// cond: uint64(c) < 64
// result: (SLDconst x [c])
@@ -3271,16 +3273,16 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool {
}
// match: (Lsh64x64 x y)
// cond:
- // result: (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ // result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
v1.AddArg(v2)
@@ -3292,22 +3294,22 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool {
func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x8 x y)
// cond:
- // result: (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ // result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3319,22 +3321,22 @@ func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool {
func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -8
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3346,8 +3348,8 @@ func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool {
func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x32 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SLWconst x [c])
@@ -3386,18 +3388,18 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool {
}
// match: (Lsh8x32 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -8
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3409,8 +3411,8 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool {
func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SLWconst x [c])
@@ -3465,16 +3467,16 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool {
}
// match: (Lsh8x64 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -8
v2.AddArg(y)
v1.AddArg(v2)
@@ -3486,22 +3488,22 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool {
func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 x y)
// cond:
- // result: (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ // result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -8
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3513,8 +3515,8 @@ func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool {
func rewriteValuePPC64_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
@@ -3522,10 +3524,10 @@ func rewriteValuePPC64_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3534,8 +3536,8 @@ func rewriteValuePPC64_OpMod16_0(v *Value) bool {
func rewriteValuePPC64_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -3543,10 +3545,10 @@ func rewriteValuePPC64_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3555,8 +3557,8 @@ func rewriteValuePPC64_OpMod16u_0(v *Value) bool {
func rewriteValuePPC64_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
// result: (SUB x (MULLW y (DIVW x y)))
@@ -3565,9 +3567,9 @@ func rewriteValuePPC64_OpMod32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLW, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVW, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32)
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
@@ -3578,8 +3580,8 @@ func rewriteValuePPC64_OpMod32_0(v *Value) bool {
func rewriteValuePPC64_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
// result: (SUB x (MULLW y (DIVWU x y)))
@@ -3588,9 +3590,9 @@ func rewriteValuePPC64_OpMod32u_0(v *Value) bool {
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLW, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32)
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
@@ -3601,8 +3603,8 @@ func rewriteValuePPC64_OpMod32u_0(v *Value) bool {
func rewriteValuePPC64_OpMod64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod64 x y)
// cond:
// result: (SUB x (MULLD y (DIVD x y)))
@@ -3611,9 +3613,9 @@ func rewriteValuePPC64_OpMod64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLD, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVD, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64)
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
@@ -3624,8 +3626,8 @@ func rewriteValuePPC64_OpMod64_0(v *Value) bool {
func rewriteValuePPC64_OpMod64u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod64u x y)
// cond:
// result: (SUB x (MULLD y (DIVDU x y)))
@@ -3634,9 +3636,9 @@ func rewriteValuePPC64_OpMod64u_0(v *Value) bool {
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLD, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64)
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
@@ -3647,8 +3649,8 @@ func rewriteValuePPC64_OpMod64u_0(v *Value) bool {
func rewriteValuePPC64_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
@@ -3656,10 +3658,10 @@ func rewriteValuePPC64_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3668,8 +3670,8 @@ func rewriteValuePPC64_OpMod8_0(v *Value) bool {
func rewriteValuePPC64_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -3677,10 +3679,10 @@ func rewriteValuePPC64_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3689,8 +3691,8 @@ func rewriteValuePPC64_OpMod8u_0(v *Value) bool {
func rewriteValuePPC64_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -3716,7 +3718,7 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpPPC64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3735,7 +3737,7 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpPPC64MOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3754,7 +3756,7 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpPPC64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3762,7 +3764,7 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVDstore dst (MOVDload src mem) mem)
for {
if v.AuxInt != 8 {
@@ -3772,12 +3774,12 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3797,14 +3799,14 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
v.reset(OpPPC64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -3825,14 +3827,14 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
v.reset(OpPPC64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -3853,14 +3855,14 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
v.reset(OpPPC64MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -3881,14 +3883,14 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
v.reset(OpPPC64MOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -3909,22 +3911,22 @@ func rewriteValuePPC64_OpMove_0(v *Value) bool {
v.reset(OpPPC64MOVBstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -4103,8 +4105,8 @@ func rewriteValuePPC64_OpNeg8_0(v *Value) bool {
func rewriteValuePPC64_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
@@ -4115,11 +4117,11 @@ func rewriteValuePPC64_OpNeq16_0(v *Value) bool {
break
}
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -4132,11 +4134,11 @@ func rewriteValuePPC64_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -4153,7 +4155,7 @@ func rewriteValuePPC64_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -4170,7 +4172,7 @@ func rewriteValuePPC64_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -4187,7 +4189,7 @@ func rewriteValuePPC64_OpNeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -4204,7 +4206,7 @@ func rewriteValuePPC64_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -4214,8 +4216,8 @@ func rewriteValuePPC64_OpNeq64F_0(v *Value) bool {
func rewriteValuePPC64_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
@@ -4226,11 +4228,11 @@ func rewriteValuePPC64_OpNeq8_0(v *Value) bool {
break
}
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -4243,11 +4245,11 @@ func rewriteValuePPC64_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -4277,7 +4279,7 @@ func rewriteValuePPC64_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
- v0 := b.NewValue0(v.Pos, OpPPC64CMP, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -4312,16 +4314,16 @@ func rewriteValuePPC64_OpNot_0(v *Value) bool {
func rewriteValuePPC64_OpOffPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OffPtr [off] ptr)
// cond:
- // result: (ADD (MOVDconst <types.Int64> [off]) ptr)
+ // result: (ADD (MOVDconst <typ.Int64> [off]) ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
v.reset(OpPPC64ADD)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
@@ -4946,7 +4948,7 @@ func rewriteValuePPC64_OpPPC64CMP_0(v *Value) bool {
break
}
v.reset(OpPPC64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(y)
v.AddArg(v0)
@@ -4989,7 +4991,7 @@ func rewriteValuePPC64_OpPPC64CMPU_0(v *Value) bool {
break
}
v.reset(OpPPC64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(y)
v.AddArg(v0)
@@ -5113,7 +5115,7 @@ func rewriteValuePPC64_OpPPC64CMPW_0(v *Value) bool {
break
}
v.reset(OpPPC64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(y)
v.AddArg(v0)
@@ -5186,7 +5188,7 @@ func rewriteValuePPC64_OpPPC64CMPWU_0(v *Value) bool {
break
}
v.reset(OpPPC64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(y)
v.AddArg(v0)
@@ -7967,24 +7969,24 @@ func rewriteValuePPC64_OpRound64F_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -7996,8 +7998,8 @@ func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SRWconst (ZeroExt16to32 x) [c])
@@ -8013,7 +8015,7 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -8033,27 +8035,27 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16Ux32 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -8065,8 +8067,8 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SRWconst (ZeroExt16to32 x) [c])
@@ -8082,7 +8084,7 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -8118,25 +8120,25 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16Ux64 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
v3.AddArg(y)
v2.AddArg(v3)
@@ -8148,24 +8150,24 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -8177,24 +8179,24 @@ func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -8206,8 +8208,8 @@ func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SRAWconst (SignExt16to32 x) [c])
@@ -8223,7 +8225,7 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -8243,27 +8245,27 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16x32 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -8275,8 +8277,8 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SRAWconst (SignExt16to32 x) [c])
@@ -8292,7 +8294,7 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -8312,7 +8314,7 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -8332,25 +8334,25 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16x64 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
v3.AddArg(y)
v2.AddArg(v3)
@@ -8362,24 +8364,24 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -8391,22 +8393,22 @@ func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 x y)
// cond:
- // result: (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ // result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8418,8 +8420,8 @@ func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux32 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SRWconst x [c])
@@ -8458,18 +8460,18 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool {
}
// match: (Rsh32Ux32 x y)
// cond:
- // result: (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ // result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8481,8 +8483,8 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SRWconst x [c])
@@ -8537,16 +8539,16 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool {
}
// match: (Rsh32Ux64 x y)
// cond:
- // result: (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ // result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
v1.AddArg(v2)
@@ -8558,22 +8560,22 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 x y)
// cond:
- // result: (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ // result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8585,22 +8587,22 @@ func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 x y)
// cond:
- // result: (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ // result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8612,8 +8614,8 @@ func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x32 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SRAWconst x [c])
@@ -8652,18 +8654,18 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool {
}
// match: (Rsh32x32 x y)
// cond:
- // result: (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ // result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8675,8 +8677,8 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SRAWconst x [c])
@@ -8733,16 +8735,16 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool {
}
// match: (Rsh32x64 x y)
// cond:
- // result: (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ // result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
v1.AddArg(v2)
@@ -8754,22 +8756,22 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 x y)
// cond:
- // result: (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ // result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8781,22 +8783,22 @@ func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux16 x y)
// cond:
- // result: (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ // result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8808,8 +8810,8 @@ func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux32 x (Const64 [c]))
// cond: uint32(c) < 64
// result: (SRDconst x [c])
@@ -8848,18 +8850,18 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool {
}
// match: (Rsh64Ux32 x y)
// cond:
- // result: (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ // result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8871,8 +8873,8 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux64 x (Const64 [c]))
// cond: uint64(c) < 64
// result: (SRDconst x [c])
@@ -8927,16 +8929,16 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool {
}
// match: (Rsh64Ux64 x y)
// cond:
- // result: (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ // result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
v1.AddArg(v2)
@@ -8948,22 +8950,22 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux8 x y)
// cond:
- // result: (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ // result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -8975,22 +8977,22 @@ func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x16 x y)
// cond:
- // result: (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ // result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -9002,8 +9004,8 @@ func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x32 x (Const64 [c]))
// cond: uint32(c) < 64
// result: (SRADconst x [c])
@@ -9042,18 +9044,18 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool {
}
// match: (Rsh64x32 x y)
// cond:
- // result: (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ // result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -9065,8 +9067,8 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x64 x (Const64 [c]))
// cond: uint64(c) < 64
// result: (SRADconst x [c])
@@ -9123,16 +9125,16 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool {
}
// match: (Rsh64x64 x y)
// cond:
- // result: (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ // result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
v1.AddArg(v2)
@@ -9144,22 +9146,22 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x8 x y)
// cond:
- // result: (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ // result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -9171,24 +9173,24 @@ func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -9200,8 +9202,8 @@ func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SRWconst (ZeroExt8to32 x) [c])
@@ -9217,7 +9219,7 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -9237,27 +9239,27 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8Ux32 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -9269,8 +9271,8 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SRWconst (ZeroExt8to32 x) [c])
@@ -9286,7 +9288,7 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -9322,25 +9324,25 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8Ux64 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
v3.AddArg(y)
v2.AddArg(v3)
@@ -9352,24 +9354,24 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -9381,24 +9383,24 @@ func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -9410,8 +9412,8 @@ func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SRAWconst (SignExt8to32 x) [c])
@@ -9427,7 +9429,7 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -9447,27 +9449,27 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8x32 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -9479,8 +9481,8 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SRAWconst (SignExt8to32 x) [c])
@@ -9496,7 +9498,7 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -9516,7 +9518,7 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -9536,25 +9538,25 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8x64 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
v3.AddArg(y)
v2.AddArg(v3)
@@ -9566,24 +9568,24 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool {
func rewriteValuePPC64_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, typ.Int64)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, types.Int64)
- v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, typ.Int64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, types.TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -9703,14 +9705,14 @@ func rewriteValuePPC64_OpStaticCall_0(v *Value) bool {
}
func rewriteValuePPC64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
@@ -9720,14 +9722,14 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is32BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
@@ -9737,14 +9739,14 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVSstore)
@@ -9754,14 +9756,14 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
+ // cond: t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
+ if !(t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
break
}
v.reset(OpPPC64MOVDstore)
@@ -9771,14 +9773,14 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitInt(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitInt(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitInt(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitInt(val.Type)) {
break
}
v.reset(OpPPC64MOVWstore)
@@ -9788,14 +9790,14 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpPPC64MOVHstore)
@@ -9805,14 +9807,14 @@ func rewriteValuePPC64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpPPC64MOVBstore)
@@ -10088,7 +10090,7 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool {
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = 2
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
@@ -10120,7 +10122,7 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool {
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = 4
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
@@ -10138,7 +10140,7 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool {
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = 4
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
v0.AddArg(destptr)
v0.AddArg(mem)
v.AddArg(v0)
@@ -10156,10 +10158,10 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool {
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = 6
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
v0.AuxInt = 4
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
v1.AddArg(destptr)
v1.AddArg(mem)
v0.AddArg(v1)
@@ -10192,7 +10194,7 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool {
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = 8
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -10216,7 +10218,7 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool {
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 8
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -10235,10 +10237,10 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool {
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 16
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 8
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(destptr)
v1.AddArg(mem)
@@ -10258,13 +10260,13 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool {
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 24
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 16
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v2.AuxInt = 0
v2.AddArg(destptr)
v2.AddArg(mem)
@@ -10285,16 +10287,16 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool {
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 32
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 24
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v1.AuxInt = 16
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v2.AuxInt = 8
v2.AddArg(destptr)
- v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(destptr)
v3.AddArg(mem)
@@ -10316,19 +10318,19 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool {
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 40
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 32
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v1.AuxInt = 24
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v2.AuxInt = 16
v2.AddArg(destptr)
- v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v3.AuxInt = 8
v3.AddArg(destptr)
- v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v4.AuxInt = 0
v4.AddArg(destptr)
v4.AddArg(mem)
@@ -10351,22 +10353,22 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool {
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = 48
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v0.AuxInt = 40
v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v1.AuxInt = 32
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v2.AuxInt = 24
v2.AddArg(destptr)
- v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v3.AuxInt = 16
v3.AddArg(destptr)
- v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v4.AuxInt = 8
v4.AddArg(destptr)
- v5 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(destptr)
v5.AddArg(mem)
@@ -10463,8 +10465,8 @@ func rewriteBlockPPC64(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockPPC64EQ:
// match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
@@ -10485,7 +10487,7 @@ func rewriteBlockPPC64(b *Block) bool {
c := v_0.AuxInt
x := v_0.Args[0]
b.Kind = BlockPPC64EQ
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
b.SetControl(v0)
@@ -10509,7 +10511,7 @@ func rewriteBlockPPC64(b *Block) bool {
c := v_0.AuxInt
x := v_0.Args[0]
b.Kind = BlockPPC64EQ
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
b.SetControl(v0)
@@ -10808,7 +10810,7 @@ func rewriteBlockPPC64(b *Block) bool {
_ = v
cond := b.Control
b.Kind = BlockPPC64NE
- v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
v0.AuxInt = 0
v0.AddArg(cond)
b.SetControl(v0)
@@ -11136,7 +11138,7 @@ func rewriteBlockPPC64(b *Block) bool {
c := v_0.AuxInt
x := v_0.Args[0]
b.Kind = BlockPPC64NE
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
b.SetControl(v0)
@@ -11160,7 +11162,7 @@ func rewriteBlockPPC64(b *Block) bool {
c := v_0.AuxInt
x := v_0.Args[0]
b.Kind = BlockPPC64NE
- v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
b.SetControl(v0)
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index ee3f07e904..1929f5491d 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueS390X(v *Value) bool {
switch v.Op {
@@ -892,8 +894,8 @@ func rewriteValueS390X_OpAndB_0(v *Value) bool {
func rewriteValueS390X_OpAtomicAdd32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (AddTupleFirst32 (LAA ptr val mem) val)
@@ -902,7 +904,7 @@ func rewriteValueS390X_OpAtomicAdd32_0(v *Value) bool {
val := v.Args[1]
mem := v.Args[2]
v.reset(OpS390XAddTupleFirst32)
- v0 := b.NewValue0(v.Pos, OpS390XLAA, MakeTuple(types.UInt32, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem))
v0.AddArg(ptr)
v0.AddArg(val)
v0.AddArg(mem)
@@ -914,8 +916,8 @@ func rewriteValueS390X_OpAtomicAdd32_0(v *Value) bool {
func rewriteValueS390X_OpAtomicAdd64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (AddTupleFirst64 (LAAG ptr val mem) val)
@@ -924,7 +926,7 @@ func rewriteValueS390X_OpAtomicAdd64_0(v *Value) bool {
val := v.Args[1]
mem := v.Args[2]
v.reset(OpS390XAddTupleFirst64)
- v0 := b.NewValue0(v.Pos, OpS390XLAAG, MakeTuple(types.UInt64, TypeMem))
+ v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem))
v0.AddArg(ptr)
v0.AddArg(val)
v0.AddArg(mem)
@@ -1106,18 +1108,18 @@ func rewriteValueS390X_OpAvg64u_0(v *Value) bool {
func rewriteValueS390X_OpBitLen64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen64 x)
// cond:
// result: (SUB (MOVDconst [64]) (FLOGR x))
for {
x := v.Args[0]
v.reset(OpS390XSUB)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XFLOGR, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -1311,8 +1313,8 @@ func rewriteValueS390X_OpConvert_0(v *Value) bool {
func rewriteValueS390X_OpCtz32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz32 <t> x)
// cond:
// result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
@@ -1320,11 +1322,11 @@ func rewriteValueS390X_OpCtz32_0(v *Value) bool {
t := v.Type
x := v.Args[0]
v.reset(OpS390XSUB)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XFLOGR, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v3 := b.NewValue0(v.Pos, OpS390XANDW, t)
v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t)
v4.AuxInt = 1
@@ -1342,8 +1344,8 @@ func rewriteValueS390X_OpCtz32_0(v *Value) bool {
func rewriteValueS390X_OpCtz64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz64 <t> x)
// cond:
// result: (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
@@ -1351,10 +1353,10 @@ func rewriteValueS390X_OpCtz64_0(v *Value) bool {
t := v.Type
x := v.Args[0]
v.reset(OpS390XSUB)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XFLOGR, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
v2 := b.NewValue0(v.Pos, OpS390XAND, t)
v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t)
v3.AuxInt = 1
@@ -1481,8 +1483,8 @@ func rewriteValueS390X_OpCvt64to64F_0(v *Value) bool {
func rewriteValueS390X_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (DIVW (MOVHreg x) (MOVHreg y))
@@ -1490,10 +1492,10 @@ func rewriteValueS390X_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1502,8 +1504,8 @@ func rewriteValueS390X_OpDiv16_0(v *Value) bool {
func rewriteValueS390X_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (DIVWU (MOVHZreg x) (MOVHZreg y))
@@ -1511,10 +1513,10 @@ func rewriteValueS390X_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1523,8 +1525,8 @@ func rewriteValueS390X_OpDiv16u_0(v *Value) bool {
func rewriteValueS390X_OpDiv32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 x y)
// cond:
// result: (DIVW (MOVWreg x) y)
@@ -1532,7 +1534,7 @@ func rewriteValueS390X_OpDiv32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
@@ -1555,8 +1557,8 @@ func rewriteValueS390X_OpDiv32F_0(v *Value) bool {
func rewriteValueS390X_OpDiv32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u x y)
// cond:
// result: (DIVWU (MOVWZreg x) y)
@@ -1564,7 +1566,7 @@ func rewriteValueS390X_OpDiv32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
@@ -1613,8 +1615,8 @@ func rewriteValueS390X_OpDiv64u_0(v *Value) bool {
func rewriteValueS390X_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (DIVW (MOVBreg x) (MOVBreg y))
@@ -1622,10 +1624,10 @@ func rewriteValueS390X_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1634,8 +1636,8 @@ func rewriteValueS390X_OpDiv8_0(v *Value) bool {
func rewriteValueS390X_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (DIVWU (MOVBZreg x) (MOVBZreg y))
@@ -1643,10 +1645,10 @@ func rewriteValueS390X_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1655,8 +1657,8 @@ func rewriteValueS390X_OpDiv8u_0(v *Value) bool {
func rewriteValueS390X_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
@@ -1664,17 +1666,17 @@ func rewriteValueS390X_OpEq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -1684,8 +1686,8 @@ func rewriteValueS390X_OpEq16_0(v *Value) bool {
func rewriteValueS390X_OpEq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq32 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
@@ -1693,13 +1695,13 @@ func rewriteValueS390X_OpEq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1709,8 +1711,8 @@ func rewriteValueS390X_OpEq32_0(v *Value) bool {
func rewriteValueS390X_OpEq32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq32F x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
@@ -1718,13 +1720,13 @@ func rewriteValueS390X_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1734,8 +1736,8 @@ func rewriteValueS390X_OpEq32F_0(v *Value) bool {
func rewriteValueS390X_OpEq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq64 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -1743,13 +1745,13 @@ func rewriteValueS390X_OpEq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1759,8 +1761,8 @@ func rewriteValueS390X_OpEq64_0(v *Value) bool {
func rewriteValueS390X_OpEq64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq64F x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
@@ -1768,13 +1770,13 @@ func rewriteValueS390X_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1784,8 +1786,8 @@ func rewriteValueS390X_OpEq64F_0(v *Value) bool {
func rewriteValueS390X_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -1793,17 +1795,17 @@ func rewriteValueS390X_OpEq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -1813,8 +1815,8 @@ func rewriteValueS390X_OpEq8_0(v *Value) bool {
func rewriteValueS390X_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -1822,17 +1824,17 @@ func rewriteValueS390X_OpEqB_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -1842,8 +1844,8 @@ func rewriteValueS390X_OpEqB_0(v *Value) bool {
func rewriteValueS390X_OpEqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqPtr x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -1851,13 +1853,13 @@ func rewriteValueS390X_OpEqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1867,8 +1869,8 @@ func rewriteValueS390X_OpEqPtr_0(v *Value) bool {
func rewriteValueS390X_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
@@ -1876,17 +1878,17 @@ func rewriteValueS390X_OpGeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -1896,8 +1898,8 @@ func rewriteValueS390X_OpGeq16_0(v *Value) bool {
func rewriteValueS390X_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
@@ -1905,17 +1907,17 @@ func rewriteValueS390X_OpGeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -1925,8 +1927,8 @@ func rewriteValueS390X_OpGeq16U_0(v *Value) bool {
func rewriteValueS390X_OpGeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
@@ -1934,13 +1936,13 @@ func rewriteValueS390X_OpGeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1950,8 +1952,8 @@ func rewriteValueS390X_OpGeq32_0(v *Value) bool {
func rewriteValueS390X_OpGeq32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
@@ -1959,13 +1961,13 @@ func rewriteValueS390X_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -1975,8 +1977,8 @@ func rewriteValueS390X_OpGeq32F_0(v *Value) bool {
func rewriteValueS390X_OpGeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
@@ -1984,13 +1986,13 @@ func rewriteValueS390X_OpGeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2000,8 +2002,8 @@ func rewriteValueS390X_OpGeq32U_0(v *Value) bool {
func rewriteValueS390X_OpGeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -2009,13 +2011,13 @@ func rewriteValueS390X_OpGeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2025,8 +2027,8 @@ func rewriteValueS390X_OpGeq64_0(v *Value) bool {
func rewriteValueS390X_OpGeq64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
@@ -2034,13 +2036,13 @@ func rewriteValueS390X_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2050,8 +2052,8 @@ func rewriteValueS390X_OpGeq64F_0(v *Value) bool {
func rewriteValueS390X_OpGeq64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
@@ -2059,13 +2061,13 @@ func rewriteValueS390X_OpGeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2075,8 +2077,8 @@ func rewriteValueS390X_OpGeq64U_0(v *Value) bool {
func rewriteValueS390X_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -2084,17 +2086,17 @@ func rewriteValueS390X_OpGeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2104,8 +2106,8 @@ func rewriteValueS390X_OpGeq8_0(v *Value) bool {
func rewriteValueS390X_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
@@ -2113,17 +2115,17 @@ func rewriteValueS390X_OpGeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2153,8 +2155,8 @@ func rewriteValueS390X_OpGetG_0(v *Value) bool {
func rewriteValueS390X_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
@@ -2162,17 +2164,17 @@ func rewriteValueS390X_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2182,8 +2184,8 @@ func rewriteValueS390X_OpGreater16_0(v *Value) bool {
func rewriteValueS390X_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
@@ -2191,17 +2193,17 @@ func rewriteValueS390X_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2211,8 +2213,8 @@ func rewriteValueS390X_OpGreater16U_0(v *Value) bool {
func rewriteValueS390X_OpGreater32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater32 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
@@ -2220,13 +2222,13 @@ func rewriteValueS390X_OpGreater32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2236,8 +2238,8 @@ func rewriteValueS390X_OpGreater32_0(v *Value) bool {
func rewriteValueS390X_OpGreater32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater32F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
@@ -2245,13 +2247,13 @@ func rewriteValueS390X_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2261,8 +2263,8 @@ func rewriteValueS390X_OpGreater32F_0(v *Value) bool {
func rewriteValueS390X_OpGreater32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater32U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
@@ -2270,13 +2272,13 @@ func rewriteValueS390X_OpGreater32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2286,8 +2288,8 @@ func rewriteValueS390X_OpGreater32U_0(v *Value) bool {
func rewriteValueS390X_OpGreater64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater64 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -2295,13 +2297,13 @@ func rewriteValueS390X_OpGreater64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2311,8 +2313,8 @@ func rewriteValueS390X_OpGreater64_0(v *Value) bool {
func rewriteValueS390X_OpGreater64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater64F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
@@ -2320,13 +2322,13 @@ func rewriteValueS390X_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2336,8 +2338,8 @@ func rewriteValueS390X_OpGreater64F_0(v *Value) bool {
func rewriteValueS390X_OpGreater64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater64U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
@@ -2345,13 +2347,13 @@ func rewriteValueS390X_OpGreater64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2361,8 +2363,8 @@ func rewriteValueS390X_OpGreater64U_0(v *Value) bool {
func rewriteValueS390X_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -2370,17 +2372,17 @@ func rewriteValueS390X_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2390,8 +2392,8 @@ func rewriteValueS390X_OpGreater8_0(v *Value) bool {
func rewriteValueS390X_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
@@ -2399,17 +2401,17 @@ func rewriteValueS390X_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2419,8 +2421,8 @@ func rewriteValueS390X_OpGreater8U_0(v *Value) bool {
func rewriteValueS390X_OpHmul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32 x y)
// cond:
// result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
@@ -2429,11 +2431,11 @@ func rewriteValueS390X_OpHmul32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRDconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpS390XMULLD, types.Int64)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2443,8 +2445,8 @@ func rewriteValueS390X_OpHmul32_0(v *Value) bool {
func rewriteValueS390X_OpHmul32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32u x y)
// cond:
// result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
@@ -2453,11 +2455,11 @@ func rewriteValueS390X_OpHmul32u_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRDconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpS390XMULLD, types.Int64)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2526,8 +2528,8 @@ func rewriteValueS390X_OpInterCall_0(v *Value) bool {
func rewriteValueS390X_OpIsInBounds_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsInBounds idx len)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
@@ -2535,13 +2537,13 @@ func rewriteValueS390X_OpIsInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
v2.AddArg(idx)
v2.AddArg(len)
v.AddArg(v2)
@@ -2551,21 +2553,21 @@ func rewriteValueS390X_OpIsInBounds_0(v *Value) bool {
func rewriteValueS390X_OpIsNonNil_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsNonNil p)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
for {
p := v.Args[0]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
v2.AuxInt = 0
v2.AddArg(p)
v.AddArg(v2)
@@ -2575,8 +2577,8 @@ func rewriteValueS390X_OpIsNonNil_0(v *Value) bool {
func rewriteValueS390X_OpIsSliceInBounds_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsSliceInBounds idx len)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
@@ -2584,13 +2586,13 @@ func rewriteValueS390X_OpIsSliceInBounds_0(v *Value) bool {
idx := v.Args[0]
len := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
v2.AddArg(idx)
v2.AddArg(len)
v.AddArg(v2)
@@ -2600,8 +2602,8 @@ func rewriteValueS390X_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValueS390X_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
@@ -2609,17 +2611,17 @@ func rewriteValueS390X_OpLeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2629,8 +2631,8 @@ func rewriteValueS390X_OpLeq16_0(v *Value) bool {
func rewriteValueS390X_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
@@ -2638,17 +2640,17 @@ func rewriteValueS390X_OpLeq16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2658,8 +2660,8 @@ func rewriteValueS390X_OpLeq16U_0(v *Value) bool {
func rewriteValueS390X_OpLeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
@@ -2667,13 +2669,13 @@ func rewriteValueS390X_OpLeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2683,8 +2685,8 @@ func rewriteValueS390X_OpLeq32_0(v *Value) bool {
func rewriteValueS390X_OpLeq32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
@@ -2692,13 +2694,13 @@ func rewriteValueS390X_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
v2.AddArg(y)
v2.AddArg(x)
v.AddArg(v2)
@@ -2708,8 +2710,8 @@ func rewriteValueS390X_OpLeq32F_0(v *Value) bool {
func rewriteValueS390X_OpLeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
@@ -2717,13 +2719,13 @@ func rewriteValueS390X_OpLeq32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2733,8 +2735,8 @@ func rewriteValueS390X_OpLeq32U_0(v *Value) bool {
func rewriteValueS390X_OpLeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -2742,13 +2744,13 @@ func rewriteValueS390X_OpLeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2758,8 +2760,8 @@ func rewriteValueS390X_OpLeq64_0(v *Value) bool {
func rewriteValueS390X_OpLeq64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
@@ -2767,13 +2769,13 @@ func rewriteValueS390X_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
v2.AddArg(y)
v2.AddArg(x)
v.AddArg(v2)
@@ -2783,8 +2785,8 @@ func rewriteValueS390X_OpLeq64F_0(v *Value) bool {
func rewriteValueS390X_OpLeq64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
@@ -2792,13 +2794,13 @@ func rewriteValueS390X_OpLeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2808,8 +2810,8 @@ func rewriteValueS390X_OpLeq64U_0(v *Value) bool {
func rewriteValueS390X_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -2817,17 +2819,17 @@ func rewriteValueS390X_OpLeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2837,8 +2839,8 @@ func rewriteValueS390X_OpLeq8_0(v *Value) bool {
func rewriteValueS390X_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
@@ -2846,17 +2848,17 @@ func rewriteValueS390X_OpLeq8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2866,8 +2868,8 @@ func rewriteValueS390X_OpLeq8U_0(v *Value) bool {
func rewriteValueS390X_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
@@ -2875,17 +2877,17 @@ func rewriteValueS390X_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2895,8 +2897,8 @@ func rewriteValueS390X_OpLess16_0(v *Value) bool {
func rewriteValueS390X_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
@@ -2904,17 +2906,17 @@ func rewriteValueS390X_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -2924,8 +2926,8 @@ func rewriteValueS390X_OpLess16U_0(v *Value) bool {
func rewriteValueS390X_OpLess32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less32 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
@@ -2933,13 +2935,13 @@ func rewriteValueS390X_OpLess32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2949,8 +2951,8 @@ func rewriteValueS390X_OpLess32_0(v *Value) bool {
func rewriteValueS390X_OpLess32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less32F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
@@ -2958,13 +2960,13 @@ func rewriteValueS390X_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
v2.AddArg(y)
v2.AddArg(x)
v.AddArg(v2)
@@ -2974,8 +2976,8 @@ func rewriteValueS390X_OpLess32F_0(v *Value) bool {
func rewriteValueS390X_OpLess32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less32U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
@@ -2983,13 +2985,13 @@ func rewriteValueS390X_OpLess32U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -2999,8 +3001,8 @@ func rewriteValueS390X_OpLess32U_0(v *Value) bool {
func rewriteValueS390X_OpLess64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less64 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -3008,13 +3010,13 @@ func rewriteValueS390X_OpLess64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -3024,8 +3026,8 @@ func rewriteValueS390X_OpLess64_0(v *Value) bool {
func rewriteValueS390X_OpLess64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less64F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
@@ -3033,13 +3035,13 @@ func rewriteValueS390X_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
v2.AddArg(y)
v2.AddArg(x)
v.AddArg(v2)
@@ -3049,8 +3051,8 @@ func rewriteValueS390X_OpLess64F_0(v *Value) bool {
func rewriteValueS390X_OpLess64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less64U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
@@ -3058,13 +3060,13 @@ func rewriteValueS390X_OpLess64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -3074,8 +3076,8 @@ func rewriteValueS390X_OpLess64U_0(v *Value) bool {
func rewriteValueS390X_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -3083,17 +3085,17 @@ func rewriteValueS390X_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -3103,8 +3105,8 @@ func rewriteValueS390X_OpLess8_0(v *Value) bool {
func rewriteValueS390X_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
@@ -3112,17 +3114,17 @@ func rewriteValueS390X_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -3270,8 +3272,8 @@ func rewriteValueS390X_OpLoad_0(v *Value) bool {
func rewriteValueS390X_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
@@ -3285,9 +3287,9 @@ func rewriteValueS390X_OpLsh16x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3311,7 +3313,7 @@ func rewriteValueS390X_OpLsh16x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -3335,7 +3337,7 @@ func rewriteValueS390X_OpLsh16x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -3346,8 +3348,8 @@ func rewriteValueS390X_OpLsh16x64_0(v *Value) bool {
func rewriteValueS390X_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
@@ -3361,9 +3363,9 @@ func rewriteValueS390X_OpLsh16x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3374,8 +3376,8 @@ func rewriteValueS390X_OpLsh16x8_0(v *Value) bool {
func rewriteValueS390X_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
@@ -3389,9 +3391,9 @@ func rewriteValueS390X_OpLsh32x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3415,7 +3417,7 @@ func rewriteValueS390X_OpLsh32x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -3439,7 +3441,7 @@ func rewriteValueS390X_OpLsh32x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -3450,8 +3452,8 @@ func rewriteValueS390X_OpLsh32x64_0(v *Value) bool {
func rewriteValueS390X_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
@@ -3465,9 +3467,9 @@ func rewriteValueS390X_OpLsh32x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3478,8 +3480,8 @@ func rewriteValueS390X_OpLsh32x8_0(v *Value) bool {
func rewriteValueS390X_OpLsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x16 <t> x y)
// cond:
// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
@@ -3493,9 +3495,9 @@ func rewriteValueS390X_OpLsh64x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3519,7 +3521,7 @@ func rewriteValueS390X_OpLsh64x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 63
v2.AddArg(y)
v1.AddArg(v2)
@@ -3543,7 +3545,7 @@ func rewriteValueS390X_OpLsh64x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v2.AuxInt = 63
v2.AddArg(y)
v1.AddArg(v2)
@@ -3554,8 +3556,8 @@ func rewriteValueS390X_OpLsh64x64_0(v *Value) bool {
func rewriteValueS390X_OpLsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x8 <t> x y)
// cond:
// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
@@ -3569,9 +3571,9 @@ func rewriteValueS390X_OpLsh64x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3582,8 +3584,8 @@ func rewriteValueS390X_OpLsh64x8_0(v *Value) bool {
func rewriteValueS390X_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
@@ -3597,9 +3599,9 @@ func rewriteValueS390X_OpLsh8x16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3623,7 +3625,7 @@ func rewriteValueS390X_OpLsh8x32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -3647,7 +3649,7 @@ func rewriteValueS390X_OpLsh8x64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -3658,8 +3660,8 @@ func rewriteValueS390X_OpLsh8x64_0(v *Value) bool {
func rewriteValueS390X_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
@@ -3673,9 +3675,9 @@ func rewriteValueS390X_OpLsh8x8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -3686,8 +3688,8 @@ func rewriteValueS390X_OpLsh8x8_0(v *Value) bool {
func rewriteValueS390X_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (MODW (MOVHreg x) (MOVHreg y))
@@ -3695,10 +3697,10 @@ func rewriteValueS390X_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3707,8 +3709,8 @@ func rewriteValueS390X_OpMod16_0(v *Value) bool {
func rewriteValueS390X_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (MODWU (MOVHZreg x) (MOVHZreg y))
@@ -3716,10 +3718,10 @@ func rewriteValueS390X_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3728,8 +3730,8 @@ func rewriteValueS390X_OpMod16u_0(v *Value) bool {
func rewriteValueS390X_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
// result: (MODW (MOVWreg x) y)
@@ -3737,7 +3739,7 @@ func rewriteValueS390X_OpMod32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
@@ -3747,8 +3749,8 @@ func rewriteValueS390X_OpMod32_0(v *Value) bool {
func rewriteValueS390X_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
// result: (MODWU (MOVWZreg x) y)
@@ -3756,7 +3758,7 @@ func rewriteValueS390X_OpMod32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
@@ -3792,8 +3794,8 @@ func rewriteValueS390X_OpMod64u_0(v *Value) bool {
func rewriteValueS390X_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (MODW (MOVBreg x) (MOVBreg y))
@@ -3801,10 +3803,10 @@ func rewriteValueS390X_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3813,8 +3815,8 @@ func rewriteValueS390X_OpMod8_0(v *Value) bool {
func rewriteValueS390X_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (MODWU (MOVBZreg x) (MOVBZreg y))
@@ -3822,10 +3824,10 @@ func rewriteValueS390X_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -3834,8 +3836,8 @@ func rewriteValueS390X_OpMod8u_0(v *Value) bool {
func rewriteValueS390X_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -3861,7 +3863,7 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpS390XMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3880,7 +3882,7 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpS390XMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3899,7 +3901,7 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpS390XMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3918,7 +3920,7 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpS390XMOVDstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -3938,14 +3940,14 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
v.reset(OpS390XMOVDstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -3966,22 +3968,22 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
v.reset(OpS390XMOVDstore)
v.AuxInt = 16
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v0.AuxInt = 16
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -4003,14 +4005,14 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
v.reset(OpS390XMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -4031,14 +4033,14 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
v.reset(OpS390XMOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -4059,14 +4061,14 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
v.reset(OpS390XMOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -4079,8 +4081,8 @@ func rewriteValueS390X_OpMove_0(v *Value) bool {
func rewriteValueS390X_OpMove_10(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [7] dst src mem)
// cond:
// result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
@@ -4094,22 +4096,22 @@ func rewriteValueS390X_OpMove_10(v *Value) bool {
v.reset(OpS390XMOVBstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -4151,7 +4153,7 @@ func rewriteValueS390X_OpMove_10(v *Value) bool {
v.AuxInt = makeValAndOff(s-256, 256)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, OpS390XMVC, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
v0.AuxInt = makeValAndOff(256, 0)
v0.AddArg(dst)
v0.AddArg(src)
@@ -4174,11 +4176,11 @@ func rewriteValueS390X_OpMove_10(v *Value) bool {
v.AuxInt = makeValAndOff(s-512, 512)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, OpS390XMVC, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
v0.AuxInt = makeValAndOff(256, 256)
v0.AddArg(dst)
v0.AddArg(src)
- v1 := b.NewValue0(v.Pos, OpS390XMVC, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
v1.AuxInt = makeValAndOff(256, 0)
v1.AddArg(dst)
v1.AddArg(src)
@@ -4202,15 +4204,15 @@ func rewriteValueS390X_OpMove_10(v *Value) bool {
v.AuxInt = makeValAndOff(s-768, 768)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, OpS390XMVC, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
v0.AuxInt = makeValAndOff(256, 512)
v0.AddArg(dst)
v0.AddArg(src)
- v1 := b.NewValue0(v.Pos, OpS390XMVC, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
v1.AuxInt = makeValAndOff(256, 256)
v1.AddArg(dst)
v1.AddArg(src)
- v2 := b.NewValue0(v.Pos, OpS390XMVC, TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
v2.AuxInt = makeValAndOff(256, 0)
v2.AddArg(dst)
v2.AddArg(src)
@@ -4325,15 +4327,15 @@ func rewriteValueS390X_OpMul8_0(v *Value) bool {
func rewriteValueS390X_OpNeg16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neg16 x)
// cond:
// result: (NEGW (MOVHreg x))
for {
x := v.Args[0]
v.reset(OpS390XNEGW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -4386,15 +4388,15 @@ func rewriteValueS390X_OpNeg64F_0(v *Value) bool {
func rewriteValueS390X_OpNeg8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neg8 x)
// cond:
// result: (NEGW (MOVBreg x))
for {
x := v.Args[0]
v.reset(OpS390XNEGW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -4403,8 +4405,8 @@ func rewriteValueS390X_OpNeg8_0(v *Value) bool {
func rewriteValueS390X_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
@@ -4412,17 +4414,17 @@ func rewriteValueS390X_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -4432,8 +4434,8 @@ func rewriteValueS390X_OpNeq16_0(v *Value) bool {
func rewriteValueS390X_OpNeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq32 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
@@ -4441,13 +4443,13 @@ func rewriteValueS390X_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -4457,8 +4459,8 @@ func rewriteValueS390X_OpNeq32_0(v *Value) bool {
func rewriteValueS390X_OpNeq32F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq32F x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
@@ -4466,13 +4468,13 @@ func rewriteValueS390X_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -4482,8 +4484,8 @@ func rewriteValueS390X_OpNeq32F_0(v *Value) bool {
func rewriteValueS390X_OpNeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq64 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -4491,13 +4493,13 @@ func rewriteValueS390X_OpNeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -4507,8 +4509,8 @@ func rewriteValueS390X_OpNeq64_0(v *Value) bool {
func rewriteValueS390X_OpNeq64F_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq64F x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
@@ -4516,13 +4518,13 @@ func rewriteValueS390X_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -4532,8 +4534,8 @@ func rewriteValueS390X_OpNeq64F_0(v *Value) bool {
func rewriteValueS390X_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -4541,17 +4543,17 @@ func rewriteValueS390X_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -4561,8 +4563,8 @@ func rewriteValueS390X_OpNeq8_0(v *Value) bool {
func rewriteValueS390X_OpNeqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqB x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
@@ -4570,17 +4572,17 @@ func rewriteValueS390X_OpNeqB_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
@@ -4590,8 +4592,8 @@ func rewriteValueS390X_OpNeqB_0(v *Value) bool {
func rewriteValueS390X_OpNeqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqPtr x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
@@ -4599,13 +4601,13 @@ func rewriteValueS390X_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v1.AuxInt = 1
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
v2.AddArg(x)
v2.AddArg(y)
v.AddArg(v2)
@@ -4640,8 +4642,8 @@ func rewriteValueS390X_OpNot_0(v *Value) bool {
func rewriteValueS390X_OpOffPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OffPtr [off] ptr:(SP))
// cond:
// result: (MOVDaddr [off] ptr)
@@ -4677,7 +4679,7 @@ func rewriteValueS390X_OpOffPtr_0(v *Value) bool {
off := v.AuxInt
ptr := v.Args[0]
v.reset(OpS390XADD)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
@@ -4774,8 +4776,8 @@ func rewriteValueS390X_OpRound64F_0(v *Value) bool {
func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
@@ -4785,15 +4787,15 @@ func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 15
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -4804,8 +4806,8 @@ func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool {
func rewriteValueS390X_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
@@ -4815,13 +4817,13 @@ func rewriteValueS390X_OpRsh16Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 15
v3.AddArg(y)
v2.AddArg(v3)
@@ -4832,8 +4834,8 @@ func rewriteValueS390X_OpRsh16Ux32_0(v *Value) bool {
func rewriteValueS390X_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
@@ -4843,13 +4845,13 @@ func rewriteValueS390X_OpRsh16Ux64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v3.AuxInt = 15
v3.AddArg(y)
v2.AddArg(v3)
@@ -4860,8 +4862,8 @@ func rewriteValueS390X_OpRsh16Ux64_0(v *Value) bool {
func rewriteValueS390X_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
@@ -4871,15 +4873,15 @@ func rewriteValueS390X_OpRsh16Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 15
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -4890,8 +4892,8 @@ func rewriteValueS390X_OpRsh16Ux8_0(v *Value) bool {
func rewriteValueS390X_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
@@ -4901,16 +4903,16 @@ func rewriteValueS390X_OpRsh16x16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v4.AuxInt = 15
- v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
@@ -4923,8 +4925,8 @@ func rewriteValueS390X_OpRsh16x16_0(v *Value) bool {
func rewriteValueS390X_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
@@ -4934,14 +4936,14 @@ func rewriteValueS390X_OpRsh16x32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v4.AuxInt = 15
v4.AddArg(y)
v3.AddArg(v4)
@@ -4954,8 +4956,8 @@ func rewriteValueS390X_OpRsh16x32_0(v *Value) bool {
func rewriteValueS390X_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
@@ -4965,14 +4967,14 @@ func rewriteValueS390X_OpRsh16x64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XOR, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOT, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v4.AuxInt = 15
v4.AddArg(y)
v3.AddArg(v4)
@@ -4985,8 +4987,8 @@ func rewriteValueS390X_OpRsh16x64_0(v *Value) bool {
func rewriteValueS390X_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
@@ -4996,16 +4998,16 @@ func rewriteValueS390X_OpRsh16x8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v4.AuxInt = 15
- v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
@@ -5018,8 +5020,8 @@ func rewriteValueS390X_OpRsh16x8_0(v *Value) bool {
func rewriteValueS390X_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
@@ -5033,9 +5035,9 @@ func rewriteValueS390X_OpRsh32Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -5059,7 +5061,7 @@ func rewriteValueS390X_OpRsh32Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -5083,7 +5085,7 @@ func rewriteValueS390X_OpRsh32Ux64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v2.AuxInt = 31
v2.AddArg(y)
v1.AddArg(v2)
@@ -5094,8 +5096,8 @@ func rewriteValueS390X_OpRsh32Ux64_0(v *Value) bool {
func rewriteValueS390X_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
@@ -5109,9 +5111,9 @@ func rewriteValueS390X_OpRsh32Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -5122,8 +5124,8 @@ func rewriteValueS390X_OpRsh32Ux8_0(v *Value) bool {
func rewriteValueS390X_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 <t> x y)
// cond:
// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
@@ -5138,9 +5140,9 @@ func rewriteValueS390X_OpRsh32x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 31
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -5167,7 +5169,7 @@ func rewriteValueS390X_OpRsh32x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 31
v3.AddArg(y)
v2.AddArg(v3)
@@ -5194,7 +5196,7 @@ func rewriteValueS390X_OpRsh32x64_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOT, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v3.AuxInt = 31
v3.AddArg(y)
v2.AddArg(v3)
@@ -5207,8 +5209,8 @@ func rewriteValueS390X_OpRsh32x64_0(v *Value) bool {
func rewriteValueS390X_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 <t> x y)
// cond:
// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
@@ -5223,9 +5225,9 @@ func rewriteValueS390X_OpRsh32x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 31
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -5238,8 +5240,8 @@ func rewriteValueS390X_OpRsh32x8_0(v *Value) bool {
func rewriteValueS390X_OpRsh64Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux16 <t> x y)
// cond:
// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
@@ -5253,9 +5255,9 @@ func rewriteValueS390X_OpRsh64Ux16_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -5279,7 +5281,7 @@ func rewriteValueS390X_OpRsh64Ux32_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 63
v2.AddArg(y)
v1.AddArg(v2)
@@ -5303,7 +5305,7 @@ func rewriteValueS390X_OpRsh64Ux64_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v2.AuxInt = 63
v2.AddArg(y)
v1.AddArg(v2)
@@ -5314,8 +5316,8 @@ func rewriteValueS390X_OpRsh64Ux64_0(v *Value) bool {
func rewriteValueS390X_OpRsh64Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux8 <t> x y)
// cond:
// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
@@ -5329,9 +5331,9 @@ func rewriteValueS390X_OpRsh64Ux8_0(v *Value) bool {
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
- v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
@@ -5342,8 +5344,8 @@ func rewriteValueS390X_OpRsh64Ux8_0(v *Value) bool {
func rewriteValueS390X_OpRsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x16 <t> x y)
// cond:
// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
@@ -5358,9 +5360,9 @@ func rewriteValueS390X_OpRsh64x16_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 63
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -5387,7 +5389,7 @@ func rewriteValueS390X_OpRsh64x32_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 63
v3.AddArg(y)
v2.AddArg(v3)
@@ -5414,7 +5416,7 @@ func rewriteValueS390X_OpRsh64x64_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOT, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v3.AuxInt = 63
v3.AddArg(y)
v2.AddArg(v3)
@@ -5427,8 +5429,8 @@ func rewriteValueS390X_OpRsh64x64_0(v *Value) bool {
func rewriteValueS390X_OpRsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x8 <t> x y)
// cond:
// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
@@ -5443,9 +5445,9 @@ func rewriteValueS390X_OpRsh64x8_0(v *Value) bool {
v0.AddArg(y)
v1 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 63
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -5458,8 +5460,8 @@ func rewriteValueS390X_OpRsh64x8_0(v *Value) bool {
func rewriteValueS390X_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
@@ -5469,15 +5471,15 @@ func rewriteValueS390X_OpRsh8Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 7
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -5488,8 +5490,8 @@ func rewriteValueS390X_OpRsh8Ux16_0(v *Value) bool {
func rewriteValueS390X_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
@@ -5499,13 +5501,13 @@ func rewriteValueS390X_OpRsh8Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 7
v3.AddArg(y)
v2.AddArg(v3)
@@ -5516,8 +5518,8 @@ func rewriteValueS390X_OpRsh8Ux32_0(v *Value) bool {
func rewriteValueS390X_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
@@ -5527,13 +5529,13 @@ func rewriteValueS390X_OpRsh8Ux64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v3.AuxInt = 7
v3.AddArg(y)
v2.AddArg(v3)
@@ -5544,8 +5546,8 @@ func rewriteValueS390X_OpRsh8Ux64_0(v *Value) bool {
func rewriteValueS390X_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
@@ -5555,15 +5557,15 @@ func rewriteValueS390X_OpRsh8Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
- v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v3.AuxInt = 7
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
@@ -5574,8 +5576,8 @@ func rewriteValueS390X_OpRsh8Ux8_0(v *Value) bool {
func rewriteValueS390X_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
@@ -5585,16 +5587,16 @@ func rewriteValueS390X_OpRsh8x16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v4.AuxInt = 7
- v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
@@ -5607,8 +5609,8 @@ func rewriteValueS390X_OpRsh8x16_0(v *Value) bool {
func rewriteValueS390X_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
@@ -5618,14 +5620,14 @@ func rewriteValueS390X_OpRsh8x32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v4.AuxInt = 7
v4.AddArg(y)
v3.AddArg(v4)
@@ -5638,8 +5640,8 @@ func rewriteValueS390X_OpRsh8x32_0(v *Value) bool {
func rewriteValueS390X_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
@@ -5649,14 +5651,14 @@ func rewriteValueS390X_OpRsh8x64_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XOR, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOT, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v4.AuxInt = 7
v4.AddArg(y)
v3.AddArg(v4)
@@ -5669,8 +5671,8 @@ func rewriteValueS390X_OpRsh8x64_0(v *Value) bool {
func rewriteValueS390X_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
@@ -5680,16 +5682,16 @@ func rewriteValueS390X_OpRsh8x8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v1.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XNOTW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
- v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v4.AuxInt = 7
- v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
@@ -7184,7 +7186,7 @@ func rewriteValueS390X_OpS390XCMP_0(v *Value) bool {
break
}
v.reset(OpS390XInvertFlags)
- v0 := b.NewValue0(v.Pos, OpS390XCMPconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -7227,7 +7229,7 @@ func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool {
break
}
v.reset(OpS390XInvertFlags)
- v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
v0.AuxInt = int64(uint32(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -7315,7 +7317,7 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpS390XInvertFlags)
- v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags)
v0.AuxInt = c
v0.AddArg(x)
v.AddArg(v0)
@@ -7352,7 +7354,7 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool {
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpS390XInvertFlags)
- v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
v0.AuxInt = int64(uint32(c))
v0.AddArg(x)
v.AddArg(v0)
@@ -12068,12 +12070,12 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool {
b := v.Block
_ = b
// match: (MOVDnop <t> x)
- // cond: t.Compare(x.Type) == CMPeq
+ // cond: t.Compare(x.Type) == types.CMPeq
// result: x
for {
t := v.Type
x := v.Args[0]
- if !(t.Compare(x.Type) == CMPeq) {
+ if !(t.Compare(x.Type) == types.CMPeq) {
break
}
v.reset(OpCopy)
@@ -12399,12 +12401,12 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool {
b := v.Block
_ = b
// match: (MOVDreg <t> x)
- // cond: t.Compare(x.Type) == CMPeq
+ // cond: t.Compare(x.Type) == types.CMPeq
// result: x
for {
t := v.Type
x := v.Args[0]
- if !(t.Compare(x.Type) == CMPeq) {
+ if !(t.Compare(x.Type) == types.CMPeq) {
break
}
v.reset(OpCopy)
@@ -15017,8 +15019,8 @@ func rewriteValueS390X_OpS390XMOVHstore_10(v *Value) bool {
func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
// cond: isU12Bit(ValAndOff(sc).Off()+off)
// result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
@@ -15092,7 +15094,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool {
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16))
v.AddArg(v0)
v.AddArg(mem)
@@ -17495,8 +17497,8 @@ func rewriteValueS390X_OpS390XMOVWstore_10(v *Value) bool {
func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
// cond: isU12Bit(ValAndOff(sc).Off()+off)
// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
@@ -17570,7 +17572,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool {
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
@@ -18731,8 +18733,8 @@ func rewriteValueS390X_OpS390XNEGW_0(v *Value) bool {
func rewriteValueS390X_OpS390XNOT_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NOT x)
// cond: true
// result: (XOR (MOVDconst [-1]) x)
@@ -18742,7 +18744,7 @@ func rewriteValueS390X_OpS390XNOT_0(v *Value) bool {
break
}
v.reset(OpS390XXOR)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
v0.AuxInt = -1
v.AddArg(v0)
v.AddArg(x)
@@ -18987,8 +18989,8 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool {
func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR <t> x g:(MOVDload [off] {sym} ptr mem))
// cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)
// result: (ORload <t> [off] {sym} x ptr mem)
@@ -19052,7 +19054,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19098,7 +19100,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19144,7 +19146,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19190,7 +19192,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19236,7 +19238,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19282,7 +19284,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19341,7 +19343,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19401,7 +19403,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19461,7 +19463,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19476,8 +19478,8 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool {
func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
@@ -19528,7 +19530,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19588,7 +19590,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19648,7 +19650,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19708,7 +19710,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19768,7 +19770,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -19819,7 +19821,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19870,7 +19872,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19921,7 +19923,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -19972,7 +19974,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20023,7 +20025,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20038,8 +20040,8 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool {
func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} p idx mem))
// cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem)
@@ -20081,7 +20083,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20132,7 +20134,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20183,7 +20185,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20234,7 +20236,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20285,7 +20287,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20336,7 +20338,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20387,7 +20389,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20438,7 +20440,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20489,7 +20491,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20540,7 +20542,7 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20555,8 +20557,8 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool {
func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} idx p mem))
// cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem)
@@ -20598,7 +20600,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20649,7 +20651,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20700,7 +20702,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20751,7 +20753,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20802,7 +20804,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20853,7 +20855,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20904,7 +20906,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -20955,7 +20957,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -21006,7 +21008,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -21070,7 +21072,7 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21086,8 +21088,8 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool {
func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZloadidx [i0] {s} p idx mem)) y)
@@ -21142,7 +21144,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21207,7 +21209,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21272,7 +21274,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21337,7 +21339,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21402,7 +21404,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21467,7 +21469,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21532,7 +21534,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21597,7 +21599,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21662,7 +21664,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21727,7 +21729,7 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21743,8 +21745,8 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool {
func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZloadidx [i0] {s} p idx mem)) y)
@@ -21799,7 +21801,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21864,7 +21866,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21929,7 +21931,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -21994,7 +21996,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22059,7 +22061,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22124,7 +22126,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22189,7 +22191,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22254,7 +22256,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22319,7 +22321,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22384,7 +22386,7 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22400,8 +22402,8 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool {
func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZloadidx [i0] {s} p idx mem)) y)
@@ -22456,7 +22458,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22521,7 +22523,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22586,7 +22588,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22651,7 +22653,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22716,7 +22718,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22781,7 +22783,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22846,7 +22848,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22911,7 +22913,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -22976,7 +22978,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -23041,7 +23043,7 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -23057,8 +23059,8 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool {
func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZloadidx [i0] {s} p idx mem)) y)
@@ -23113,7 +23115,7 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -23161,10 +23163,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -23209,10 +23211,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -23265,10 +23267,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -23321,10 +23323,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -23377,7 +23379,7 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23431,7 +23433,7 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -23490,8 +23492,8 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23552,8 +23554,8 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23614,8 +23616,8 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23631,8 +23633,8 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool {
func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)))
// cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
@@ -23683,8 +23685,8 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23753,8 +23755,8 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23823,8 +23825,8 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23893,8 +23895,8 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -23963,8 +23965,8 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -24016,10 +24018,10 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24069,10 +24071,10 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24122,10 +24124,10 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24175,10 +24177,10 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24228,10 +24230,10 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24245,8 +24247,8 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool {
func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} p idx mem))
// cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))
@@ -24288,10 +24290,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24341,10 +24343,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24394,10 +24396,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24455,10 +24457,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24516,10 +24518,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24577,10 +24579,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24638,10 +24640,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24699,10 +24701,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24760,10 +24762,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24821,10 +24823,10 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24838,8 +24840,8 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool {
func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))
@@ -24889,10 +24891,10 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -24950,7 +24952,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25009,7 +25011,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25068,7 +25070,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25127,7 +25129,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25186,7 +25188,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25245,7 +25247,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25304,7 +25306,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25363,7 +25365,7 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -25427,8 +25429,8 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25445,8 +25447,8 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool {
func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y))
// cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y)
@@ -25501,8 +25503,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25568,8 +25570,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25635,8 +25637,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25702,8 +25704,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25769,8 +25771,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25836,8 +25838,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25903,8 +25905,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -25970,8 +25972,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26037,8 +26039,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26104,8 +26106,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26122,8 +26124,8 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool {
func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))
// cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y)
@@ -26178,8 +26180,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26245,8 +26247,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26312,8 +26314,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26379,8 +26381,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26446,8 +26448,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26521,8 +26523,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26596,8 +26598,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26671,8 +26673,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26746,8 +26748,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26821,8 +26823,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26839,8 +26841,8 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool {
func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y)
@@ -26903,8 +26905,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -26978,8 +26980,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27053,8 +27055,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27128,8 +27130,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27203,8 +27205,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27278,8 +27280,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27353,8 +27355,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27428,8 +27430,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27503,8 +27505,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27578,8 +27580,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27596,8 +27598,8 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool {
func rewriteValueS390X_OpS390XOR_150(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y)
@@ -27660,8 +27662,8 @@ func rewriteValueS390X_OpS390XOR_150(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -27906,8 +27908,8 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool {
func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW <t> g:(MOVWZload [off] {sym} ptr mem) x)
// cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)
// result: (ORWload <t> [off] {sym} x ptr mem)
@@ -28023,7 +28025,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28069,7 +28071,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28115,7 +28117,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28161,7 +28163,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28220,7 +28222,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -28280,7 +28282,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -28340,7 +28342,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -28355,8 +28357,8 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool {
func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
@@ -28407,7 +28409,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -28458,7 +28460,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28509,7 +28511,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28560,7 +28562,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28611,7 +28613,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28662,7 +28664,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28713,7 +28715,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28764,7 +28766,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28815,7 +28817,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28866,7 +28868,7 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28881,8 +28883,8 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool {
func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)))
// cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem)
@@ -28924,7 +28926,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -28975,7 +28977,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -29026,7 +29028,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -29077,7 +29079,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -29128,7 +29130,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -29179,7 +29181,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -29230,7 +29232,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -29294,7 +29296,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29359,7 +29361,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29424,7 +29426,7 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29440,8 +29442,8 @@ func rewriteValueS390X_OpS390XORW_30(v *Value) bool {
func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZloadidx [i0] {s} p idx mem)) y)
@@ -29496,7 +29498,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29561,7 +29563,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29626,7 +29628,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29691,7 +29693,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29756,7 +29758,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29821,7 +29823,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29886,7 +29888,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -29951,7 +29953,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -30016,7 +30018,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -30081,7 +30083,7 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -30097,8 +30099,8 @@ func rewriteValueS390X_OpS390XORW_40(v *Value) bool {
func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZloadidx [i0] {s} p idx mem)) y)
@@ -30153,7 +30155,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -30218,7 +30220,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -30283,7 +30285,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j1
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, typ.UInt16)
v2.AuxInt = i0
v2.Aux = s
v2.AddArg(p)
@@ -30331,10 +30333,10 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -30379,10 +30381,10 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -30435,7 +30437,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -30489,7 +30491,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -30548,8 +30550,8 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30610,8 +30612,8 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30672,8 +30674,8 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30689,8 +30691,8 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool {
func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)))
// cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
@@ -30741,8 +30743,8 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -30794,10 +30796,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -30847,10 +30849,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -30900,10 +30902,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -30953,10 +30955,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -31006,10 +31008,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -31059,10 +31061,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -31112,10 +31114,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -31165,10 +31167,10 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v1.AuxInt = i0
v1.Aux = s
v1.AddArg(p)
@@ -31226,7 +31228,7 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31241,8 +31243,8 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool {
func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem)
@@ -31292,7 +31294,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31351,7 +31353,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31410,7 +31412,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31469,7 +31471,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31528,7 +31530,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31587,7 +31589,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31646,7 +31648,7 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i0
@@ -31710,8 +31712,8 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -31777,8 +31779,8 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -31844,8 +31846,8 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -31862,8 +31864,8 @@ func rewriteValueS390X_OpS390XORW_70(v *Value) bool {
func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y))
// cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y)
@@ -31918,8 +31920,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -31985,8 +31987,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32052,8 +32054,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32119,8 +32121,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32186,8 +32188,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32253,8 +32255,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32320,8 +32322,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32387,8 +32389,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32454,8 +32456,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32521,8 +32523,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32539,8 +32541,8 @@ func rewriteValueS390X_OpS390XORW_80(v *Value) bool {
func rewriteValueS390X_OpS390XORW_90(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))
// cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y)
@@ -32595,8 +32597,8 @@ func rewriteValueS390X_OpS390XORW_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32662,8 +32664,8 @@ func rewriteValueS390X_OpS390XORW_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -32729,8 +32731,8 @@ func rewriteValueS390X_OpS390XORW_90(v *Value) bool {
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type)
v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
v3.AuxInt = i0
v3.Aux = s
v3.AddArg(p)
@@ -34335,14 +34337,14 @@ func rewriteValueS390X_OpStaticCall_0(v *Value) bool {
}
func rewriteValueS390X_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpS390XFMOVDstore)
@@ -34352,14 +34354,14 @@ func rewriteValueS390X_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpS390XFMOVSstore)
@@ -34369,14 +34371,14 @@ func rewriteValueS390X_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8
+ // cond: t.(*types.Type).Size() == 8
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8) {
+ if !(t.(*types.Type).Size() == 8) {
break
}
v.reset(OpS390XMOVDstore)
@@ -34386,14 +34388,14 @@ func rewriteValueS390X_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4
+ // cond: t.(*types.Type).Size() == 4
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4) {
+ if !(t.(*types.Type).Size() == 4) {
break
}
v.reset(OpS390XMOVWstore)
@@ -34403,14 +34405,14 @@ func rewriteValueS390X_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpS390XMOVHstore)
@@ -34420,14 +34422,14 @@ func rewriteValueS390X_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpS390XMOVBstore)
@@ -34741,7 +34743,7 @@ func rewriteValueS390X_OpZero_0(v *Value) bool {
v.reset(OpS390XMOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -34760,7 +34762,7 @@ func rewriteValueS390X_OpZero_0(v *Value) bool {
v.reset(OpS390XMOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -34779,7 +34781,7 @@ func rewriteValueS390X_OpZero_0(v *Value) bool {
v.reset(OpS390XMOVHstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -34798,7 +34800,7 @@ func rewriteValueS390X_OpZero_0(v *Value) bool {
v.reset(OpS390XMOVWstoreconst)
v.AuxInt = makeValAndOff(0, 3)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
v0.AuxInt = 0
v0.AddArg(destptr)
v0.AddArg(mem)
@@ -34919,8 +34921,8 @@ func rewriteBlockS390X(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockS390XEQ:
// match: (EQ (InvertFlags cmp) yes no)
@@ -35296,15 +35298,15 @@ func rewriteBlockS390X(b *Block) bool {
}
// match: (If cond yes no)
// cond:
- // result: (NE (CMPWconst [0] (MOVBZreg <types.Bool> cond)) yes no)
+ // result: (NE (CMPWconst [0] (MOVBZreg <typ.Bool> cond)) yes no)
for {
v := b.Control
_ = v
cond := b.Control
b.Kind = BlockS390XNE
- v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags)
v0.AuxInt = 0
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, types.Bool)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.Bool)
v1.AddArg(cond)
v0.AddArg(v1)
b.SetControl(v0)
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
index 7040abbec0..710170cec8 100644
--- a/src/cmd/compile/internal/ssa/rewritedec.go
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValuedec(v *Value) bool {
switch v.Op {
@@ -113,11 +115,11 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 8
- // result: (ComplexMake (Load <types.Float32> ptr mem) (Load <types.Float32> (OffPtr <types.Float32Ptr> [4] ptr) mem) )
+ // result: (ComplexMake (Load <typ.Float32> ptr mem) (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
@@ -126,12 +128,12 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpLoad, types.Float32)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.Float32)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float32Ptr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -141,7 +143,7 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 16
- // result: (ComplexMake (Load <types.Float64> ptr mem) (Load <types.Float64> (OffPtr <types.Float64Ptr> [8] ptr) mem) )
+ // result: (ComplexMake (Load <typ.Float64> ptr mem) (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
@@ -150,12 +152,12 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpLoad, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.Float64)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float64Ptr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -165,7 +167,7 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsString()
- // result: (StringMake (Load <types.BytePtr> ptr mem) (Load <types.Int> (OffPtr <types.IntPtr> [config.PtrSize] ptr) mem))
+ // result: (StringMake (Load <typ.BytePtr> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -174,12 +176,12 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.Int)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -189,7 +191,7 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsSlice()
- // result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <types.Int> (OffPtr <types.IntPtr> [config.PtrSize] ptr) mem) (Load <types.Int> (OffPtr <types.IntPtr> [2*config.PtrSize] ptr) mem))
+ // result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -202,15 +204,15 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.Int)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpLoad, types.Int)
- v4 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
+ v3 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
@@ -220,7 +222,7 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: t.IsInterface()
- // result: (IMake (Load <types.BytePtr> ptr mem) (Load <types.BytePtr> (OffPtr <types.BytePtrPtr> [config.PtrSize] ptr) mem))
+ // result: (IMake (Load <typ.BytePtr> ptr mem) (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -229,12 +231,12 @@ func rewriteValuedec_OpLoad_0(v *Value) bool {
break
}
v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtrPtr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -300,11 +302,11 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Store {t} dst (ComplexMake real imag) mem)
- // cond: t.(Type).Size() == 8
- // result: (Store {types.Float32} (OffPtr <types.Float32Ptr> [4] dst) imag (Store {types.Float32} dst real mem))
+ // cond: t.(*types.Type).Size() == 8
+ // result: (Store {typ.Float32} (OffPtr <typ.Float32Ptr> [4] dst) imag (Store {typ.Float32} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
@@ -315,18 +317,18 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8) {
+ if !(t.(*types.Type).Size() == 8) {
break
}
v.reset(OpStore)
- v.Aux = types.Float32
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float32Ptr)
+ v.Aux = typ.Float32
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = types.Float32
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typ.Float32
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
@@ -334,8 +336,8 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} dst (ComplexMake real imag) mem)
- // cond: t.(Type).Size() == 16
- // result: (Store {types.Float64} (OffPtr <types.Float64Ptr> [8] dst) imag (Store {types.Float64} dst real mem))
+ // cond: t.(*types.Type).Size() == 16
+ // result: (Store {typ.Float64} (OffPtr <typ.Float64Ptr> [8] dst) imag (Store {typ.Float64} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
@@ -346,18 +348,18 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 16) {
+ if !(t.(*types.Type).Size() == 16) {
break
}
v.reset(OpStore)
- v.Aux = types.Float64
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float64Ptr)
+ v.Aux = typ.Float64
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = types.Float64
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typ.Float64
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
@@ -366,7 +368,7 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
}
// match: (Store dst (StringMake ptr len) mem)
// cond:
- // result: (Store {types.Int} (OffPtr <types.IntPtr> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem))
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
@@ -377,14 +379,14 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
len := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
- v.Aux = types.Int
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
+ v.Aux = typ.Int
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(len)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = types.BytePtr
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typ.BytePtr
v1.AddArg(dst)
v1.AddArg(ptr)
v1.AddArg(mem)
@@ -393,7 +395,7 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
}
// match: (Store dst (SliceMake ptr len cap) mem)
// cond:
- // result: (Store {types.Int} (OffPtr <types.IntPtr> [2*config.PtrSize] dst) cap (Store {types.Int} (OffPtr <types.IntPtr> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem)))
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem)))
for {
dst := v.Args[0]
v_1 := v.Args[1]
@@ -405,21 +407,21 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
cap := v_1.Args[2]
mem := v.Args[2]
v.reset(OpStore)
- v.Aux = types.Int
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
+ v.Aux = typ.Int
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = types.Int
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.IntPtr)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typ.Int
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(len)
- v3 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v3.Aux = types.BytePtr
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typ.BytePtr
v3.AddArg(dst)
v3.AddArg(ptr)
v3.AddArg(mem)
@@ -429,7 +431,7 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
}
// match: (Store dst (IMake itab data) mem)
// cond:
- // result: (Store {types.BytePtr} (OffPtr <types.BytePtrPtr> [config.PtrSize] dst) data (Store {types.Uintptr} dst itab mem))
+ // result: (Store {typ.BytePtr} (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst) data (Store {typ.Uintptr} dst itab mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
@@ -440,14 +442,14 @@ func rewriteValuedec_OpStore_0(v *Value) bool {
data := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
- v.Aux = types.BytePtr
- v0 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtrPtr)
+ v.Aux = typ.BytePtr
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(data)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = types.Uintptr
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typ.Uintptr
v1.AddArg(dst)
v1.AddArg(itab)
v1.AddArg(mem)
@@ -495,8 +497,8 @@ func rewriteBlockdec(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index 0fe0d2197e..c0f54597f3 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValuedec64(v *Value) bool {
switch v.Op {
@@ -133,39 +135,39 @@ func rewriteValuedec64(v *Value) bool {
func rewriteValuedec64_OpAdd64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Add64 x y)
// cond:
- // result: (Int64Make (Add32withcarry <types.Int32> (Int64Hi x) (Int64Hi y) (Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <types.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+ // result: (Int64Make (Add32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpAdd32withcarry, types.Int32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v4 := b.NewValue0(v.Pos, OpAdd32carry, MakeTuple(types.UInt32, TypeFlags))
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
v0.AddArg(v3)
v.AddArg(v0)
- v7 := b.NewValue0(v.Pos, OpSelect0, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpAdd32carry, MakeTuple(types.UInt32, TypeFlags))
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(x)
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v10.AddArg(y)
v8.AddArg(v10)
v7.AddArg(v8)
@@ -176,28 +178,28 @@ func rewriteValuedec64_OpAdd64_0(v *Value) bool {
func rewriteValuedec64_OpAnd64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (And64 x y)
// cond:
- // result: (Int64Make (And32 <types.UInt32> (Int64Hi x) (Int64Hi y)) (And32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
+ // result: (Int64Make (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpAnd32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAnd32, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
@@ -209,11 +211,11 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned()
- // result: (Int64Make (Arg <types.Int32> {n} [off+4]) (Arg <types.UInt32> {n} [off]))
+ // result: (Int64Make (Arg <typ.Int32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
for {
off := v.AuxInt
n := v.Aux
@@ -221,11 +223,11 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
v0.AuxInt = off + 4
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
v1.AuxInt = off
v1.Aux = n
v.AddArg(v1)
@@ -233,7 +235,7 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned()
- // result: (Int64Make (Arg <types.UInt32> {n} [off+4]) (Arg <types.UInt32> {n} [off]))
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
for {
off := v.AuxInt
n := v.Aux
@@ -241,11 +243,11 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
v0.AuxInt = off + 4
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
v1.AuxInt = off
v1.Aux = n
v.AddArg(v1)
@@ -253,7 +255,7 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned()
- // result: (Int64Make (Arg <types.Int32> {n} [off]) (Arg <types.UInt32> {n} [off+4]))
+ // result: (Int64Make (Arg <typ.Int32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
@@ -261,11 +263,11 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
@@ -273,7 +275,7 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned()
- // result: (Int64Make (Arg <types.UInt32> {n} [off]) (Arg <types.UInt32> {n} [off+4]))
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
@@ -281,11 +283,11 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
@@ -296,27 +298,27 @@ func rewriteValuedec64_OpArg_0(v *Value) bool {
func rewriteValuedec64_OpBitLen64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen64 x)
// cond:
- // result: (Add32 <types.Int> (BitLen32 <types.Int> (Int64Hi x)) (BitLen32 <types.Int> (Or32 <types.UInt32> (Int64Lo x) (Zeromask (Int64Hi x)))))
+ // result: (Add32 <typ.Int> (BitLen32 <typ.Int> (Int64Hi x)) (BitLen32 <typ.Int> (Or32 <typ.UInt32> (Int64Lo x) (Zeromask (Int64Hi x)))))
for {
x := v.Args[0]
v.reset(OpAdd32)
- v.Type = types.Int
- v0 := b.NewValue0(v.Pos, OpBitLen32, types.Int)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v.Type = typ.Int
+ v0 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpBitLen32, types.Int)
- v3 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(x)
v5.AddArg(v6)
v3.AddArg(v5)
@@ -328,21 +330,21 @@ func rewriteValuedec64_OpBitLen64_0(v *Value) bool {
func rewriteValuedec64_OpBswap64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Bswap64 x)
// cond:
- // result: (Int64Make (Bswap32 <types.UInt32> (Int64Lo x)) (Bswap32 <types.UInt32> (Int64Hi x)))
+ // result: (Int64Make (Bswap32 <typ.UInt32> (Int64Lo x)) (Bswap32 <typ.UInt32> (Int64Hi x)))
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpBswap32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpBswap32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v3.AddArg(x)
v2.AddArg(v3)
v.AddArg(v2)
@@ -352,21 +354,21 @@ func rewriteValuedec64_OpBswap64_0(v *Value) bool {
func rewriteValuedec64_OpCom64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Com64 x)
// cond:
- // result: (Int64Make (Com32 <types.UInt32> (Int64Hi x)) (Com32 <types.UInt32> (Int64Lo x)))
+ // result: (Int64Make (Com32 <typ.UInt32> (Int64Hi x)) (Com32 <typ.UInt32> (Int64Lo x)))
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpCom32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpCom32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v3.AddArg(x)
v2.AddArg(v3)
v.AddArg(v2)
@@ -376,11 +378,11 @@ func rewriteValuedec64_OpCom64_0(v *Value) bool {
func rewriteValuedec64_OpConst64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Const64 <t> [c])
// cond: t.IsSigned()
- // result: (Int64Make (Const32 <types.Int32> [c>>32]) (Const32 <types.UInt32> [int64(int32(c))]))
+ // result: (Int64Make (Const32 <typ.Int32> [c>>32]) (Const32 <typ.UInt32> [int64(int32(c))]))
for {
t := v.Type
c := v.AuxInt
@@ -388,17 +390,17 @@ func rewriteValuedec64_OpConst64_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpConst32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
v0.AuxInt = c >> 32
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v1.AuxInt = int64(int32(c))
v.AddArg(v1)
return true
}
// match: (Const64 <t> [c])
// cond: !t.IsSigned()
- // result: (Int64Make (Const32 <types.UInt32> [c>>32]) (Const32 <types.UInt32> [int64(int32(c))]))
+ // result: (Int64Make (Const32 <typ.UInt32> [c>>32]) (Const32 <typ.UInt32> [int64(int32(c))]))
for {
t := v.Type
c := v.AuxInt
@@ -406,10 +408,10 @@ func rewriteValuedec64_OpConst64_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v0.AuxInt = c >> 32
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v1.AuxInt = int64(int32(c))
v.AddArg(v1)
return true
@@ -419,30 +421,30 @@ func rewriteValuedec64_OpConst64_0(v *Value) bool {
func rewriteValuedec64_OpCtz64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz64 x)
// cond:
- // result: (Add32 <types.UInt32> (Ctz32 <types.UInt32> (Int64Lo x)) (And32 <types.UInt32> (Com32 <types.UInt32> (Zeromask (Int64Lo x))) (Ctz32 <types.UInt32> (Int64Hi x))))
+ // result: (Add32 <typ.UInt32> (Ctz32 <typ.UInt32> (Int64Lo x)) (And32 <typ.UInt32> (Com32 <typ.UInt32> (Zeromask (Int64Lo x))) (Ctz32 <typ.UInt32> (Int64Hi x))))
for {
x := v.Args[0]
v.reset(OpAdd32)
- v.Type = types.UInt32
- v0 := b.NewValue0(v.Pos, OpCtz32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpAnd32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpCom32, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
v3.AddArg(v4)
v2.AddArg(v3)
- v6 := b.NewValue0(v.Pos, OpCtz32, types.UInt32)
- v7 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v7.AddArg(x)
v6.AddArg(v7)
v2.AddArg(v6)
@@ -453,8 +455,8 @@ func rewriteValuedec64_OpCtz64_0(v *Value) bool {
func rewriteValuedec64_OpEq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq64 x y)
// cond:
// result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y)))
@@ -462,19 +464,19 @@ func rewriteValuedec64_OpEq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpAndB)
- v0 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v4 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
@@ -484,8 +486,8 @@ func rewriteValuedec64_OpEq64_0(v *Value) bool {
func rewriteValuedec64_OpGeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64 x y)
// cond:
// result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y))))
@@ -493,28 +495,28 @@ func rewriteValuedec64_OpGeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpGreater32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGeq32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpGeq32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -525,8 +527,8 @@ func rewriteValuedec64_OpGeq64_0(v *Value) bool {
func rewriteValuedec64_OpGeq64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq64U x y)
// cond:
// result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y))))
@@ -534,28 +536,28 @@ func rewriteValuedec64_OpGeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32U, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpGreater32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGeq32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpGeq32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -566,8 +568,8 @@ func rewriteValuedec64_OpGeq64U_0(v *Value) bool {
func rewriteValuedec64_OpGreater64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater64 x y)
// cond:
// result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y))))
@@ -575,28 +577,28 @@ func rewriteValuedec64_OpGreater64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpGreater32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGreater32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpGreater32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -607,8 +609,8 @@ func rewriteValuedec64_OpGreater64_0(v *Value) bool {
func rewriteValuedec64_OpGreater64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater64U x y)
// cond:
// result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y))))
@@ -616,28 +618,28 @@ func rewriteValuedec64_OpGreater64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32U, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpGreater32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGreater32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpGreater32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -682,8 +684,8 @@ func rewriteValuedec64_OpInt64Lo_0(v *Value) bool {
func rewriteValuedec64_OpLeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64 x y)
// cond:
// result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
@@ -691,28 +693,28 @@ func rewriteValuedec64_OpLeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLeq32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -723,8 +725,8 @@ func rewriteValuedec64_OpLeq64_0(v *Value) bool {
func rewriteValuedec64_OpLeq64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq64U x y)
// cond:
// result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
@@ -732,28 +734,28 @@ func rewriteValuedec64_OpLeq64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32U, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLeq32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -764,8 +766,8 @@ func rewriteValuedec64_OpLeq64U_0(v *Value) bool {
func rewriteValuedec64_OpLess64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less64 x y)
// cond:
// result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
@@ -773,28 +775,28 @@ func rewriteValuedec64_OpLess64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLess32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -805,8 +807,8 @@ func rewriteValuedec64_OpLess64_0(v *Value) bool {
func rewriteValuedec64_OpLess64U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less64U x y)
// cond:
// result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
@@ -814,28 +816,28 @@ func rewriteValuedec64_OpLess64U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32U, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, types.Bool)
- v4 := b.NewValue0(v.Pos, OpEq32, types.Bool)
- v5 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLess32U, types.Bool)
- v8 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
@@ -848,11 +850,11 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
- // result: (Int64Make (Load <types.Int32> (OffPtr <types.Int32Ptr> [4] ptr) mem) (Load <types.UInt32> ptr mem))
+ // result: (Int64Make (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -861,14 +863,14 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, types.Int32)
- v1 := b.NewValue0(v.Pos, OpOffPtr, types.Int32Ptr)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr)
v1.AuxInt = 4
v1.AddArg(ptr)
v0.AddArg(v1)
v0.AddArg(mem)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
v2.AddArg(ptr)
v2.AddArg(mem)
v.AddArg(v2)
@@ -876,7 +878,7 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
- // result: (Int64Make (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem) (Load <types.UInt32> ptr mem))
+ // result: (Int64Make (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -885,14 +887,14 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32Ptr)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
v1.AuxInt = 4
v1.AddArg(ptr)
v0.AddArg(v1)
v0.AddArg(mem)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
v2.AddArg(ptr)
v2.AddArg(mem)
v.AddArg(v2)
@@ -900,7 +902,7 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
- // result: (Int64Make (Load <types.Int32> ptr mem) (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
+ // result: (Int64Make (Load <typ.Int32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -909,12 +911,12 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -924,7 +926,7 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
- // result: (Int64Make (Load <types.UInt32> ptr mem) (Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
+ // result: (Int64Make (Load <typ.UInt32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
@@ -933,12 +935,12 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOffPtr, types.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
@@ -951,8 +953,8 @@ func rewriteValuedec64_OpLoad_0(v *Value) bool {
func rewriteValuedec64_OpLsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
@@ -997,7 +999,7 @@ func rewriteValuedec64_OpLsh16x64_0(v *Value) bool {
}
// match: (Lsh16x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh16x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1011,8 +1013,8 @@ func rewriteValuedec64_OpLsh16x64_0(v *Value) bool {
}
v.reset(OpLsh16x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1024,8 +1026,8 @@ func rewriteValuedec64_OpLsh16x64_0(v *Value) bool {
func rewriteValuedec64_OpLsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
@@ -1070,7 +1072,7 @@ func rewriteValuedec64_OpLsh32x64_0(v *Value) bool {
}
// match: (Lsh32x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh32x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1084,8 +1086,8 @@ func rewriteValuedec64_OpLsh32x64_0(v *Value) bool {
}
v.reset(OpLsh32x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1097,11 +1099,11 @@ func rewriteValuedec64_OpLsh32x64_0(v *Value) bool {
func rewriteValuedec64_OpLsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x16 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Or32 <types.UInt32> (Or32 <types.UInt32> (Lsh32x16 <types.UInt32> hi s) (Rsh32Ux16 <types.UInt32> lo (Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s))) (Lsh32x16 <types.UInt32> lo (Sub16 <types.UInt16> s (Const16 <types.UInt16> [32])))) (Lsh32x16 <types.UInt32> lo s))
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x16 <typ.UInt32> hi s) (Rsh32Ux16 <typ.UInt32> lo (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Lsh32x16 <typ.UInt32> lo (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))) (Lsh32x16 <typ.UInt32> lo s))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -1111,33 +1113,33 @@ func rewriteValuedec64_OpLsh64x16_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpLsh32x16, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
v2.AddArg(hi)
v2.AddArg(s)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
v3.AddArg(lo)
- v4 := b.NewValue0(v.Pos, OpSub16, types.UInt16)
- v5 := b.NewValue0(v.Pos, OpConst16, types.UInt16)
+ v4 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
v5.AuxInt = 32
v4.AddArg(v5)
v4.AddArg(s)
v3.AddArg(v4)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpLsh32x16, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
v6.AddArg(lo)
- v7 := b.NewValue0(v.Pos, OpSub16, types.UInt16)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
v7.AddArg(s)
- v8 := b.NewValue0(v.Pos, OpConst16, types.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
v8.AuxInt = 32
v7.AddArg(v8)
v6.AddArg(v7)
v0.AddArg(v6)
v.AddArg(v0)
- v9 := b.NewValue0(v.Pos, OpLsh32x16, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
v9.AddArg(lo)
v9.AddArg(s)
v.AddArg(v9)
@@ -1148,11 +1150,11 @@ func rewriteValuedec64_OpLsh64x16_0(v *Value) bool {
func rewriteValuedec64_OpLsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x32 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Or32 <types.UInt32> (Or32 <types.UInt32> (Lsh32x32 <types.UInt32> hi s) (Rsh32Ux32 <types.UInt32> lo (Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s))) (Lsh32x32 <types.UInt32> lo (Sub32 <types.UInt32> s (Const32 <types.UInt32> [32])))) (Lsh32x32 <types.UInt32> lo s))
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x32 <typ.UInt32> hi s) (Rsh32Ux32 <typ.UInt32> lo (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Lsh32x32 <typ.UInt32> lo (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))) (Lsh32x32 <typ.UInt32> lo s))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -1162,33 +1164,33 @@ func rewriteValuedec64_OpLsh64x32_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpLsh32x32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
v2.AddArg(hi)
v2.AddArg(s)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
v3.AddArg(lo)
- v4 := b.NewValue0(v.Pos, OpSub32, types.UInt32)
- v5 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v5.AuxInt = 32
v4.AddArg(v5)
v4.AddArg(s)
v3.AddArg(v4)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpLsh32x32, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
v6.AddArg(lo)
- v7 := b.NewValue0(v.Pos, OpSub32, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
v7.AddArg(s)
- v8 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v8.AuxInt = 32
v7.AddArg(v8)
v6.AddArg(v7)
v0.AddArg(v6)
v.AddArg(v0)
- v9 := b.NewValue0(v.Pos, OpLsh32x32, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
v9.AddArg(lo)
v9.AddArg(s)
v.AddArg(v9)
@@ -1199,8 +1201,8 @@ func rewriteValuedec64_OpLsh64x32_0(v *Value) bool {
func rewriteValuedec64_OpLsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const64 [0])
@@ -1245,7 +1247,7 @@ func rewriteValuedec64_OpLsh64x64_0(v *Value) bool {
}
// match: (Lsh64x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh64x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1259,8 +1261,8 @@ func rewriteValuedec64_OpLsh64x64_0(v *Value) bool {
}
v.reset(OpLsh64x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1272,11 +1274,11 @@ func rewriteValuedec64_OpLsh64x64_0(v *Value) bool {
func rewriteValuedec64_OpLsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x8 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Or32 <types.UInt32> (Or32 <types.UInt32> (Lsh32x8 <types.UInt32> hi s) (Rsh32Ux8 <types.UInt32> lo (Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s))) (Lsh32x8 <types.UInt32> lo (Sub8 <types.UInt8> s (Const8 <types.UInt8> [32])))) (Lsh32x8 <types.UInt32> lo s))
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x8 <typ.UInt32> hi s) (Rsh32Ux8 <typ.UInt32> lo (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Lsh32x8 <typ.UInt32> lo (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))) (Lsh32x8 <typ.UInt32> lo s))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -1286,33 +1288,33 @@ func rewriteValuedec64_OpLsh64x8_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpLsh32x8, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
v2.AddArg(hi)
v2.AddArg(s)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
v3.AddArg(lo)
- v4 := b.NewValue0(v.Pos, OpSub8, types.UInt8)
- v5 := b.NewValue0(v.Pos, OpConst8, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
v5.AuxInt = 32
v4.AddArg(v5)
v4.AddArg(s)
v3.AddArg(v4)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpLsh32x8, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
v6.AddArg(lo)
- v7 := b.NewValue0(v.Pos, OpSub8, types.UInt8)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
v7.AddArg(s)
- v8 := b.NewValue0(v.Pos, OpConst8, types.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
v8.AuxInt = 32
v7.AddArg(v8)
v6.AddArg(v7)
v0.AddArg(v6)
v.AddArg(v0)
- v9 := b.NewValue0(v.Pos, OpLsh32x8, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
v9.AddArg(lo)
v9.AddArg(s)
v.AddArg(v9)
@@ -1323,8 +1325,8 @@ func rewriteValuedec64_OpLsh64x8_0(v *Value) bool {
func rewriteValuedec64_OpLsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
@@ -1369,7 +1371,7 @@ func rewriteValuedec64_OpLsh8x64_0(v *Value) bool {
}
// match: (Lsh8x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh8x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1383,8 +1385,8 @@ func rewriteValuedec64_OpLsh8x64_0(v *Value) bool {
}
v.reset(OpLsh8x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1396,51 +1398,51 @@ func rewriteValuedec64_OpLsh8x64_0(v *Value) bool {
func rewriteValuedec64_OpMul64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul64 x y)
// cond:
- // result: (Int64Make (Add32 <types.UInt32> (Mul32 <types.UInt32> (Int64Lo x) (Int64Hi y)) (Add32 <types.UInt32> (Mul32 <types.UInt32> (Int64Hi x) (Int64Lo y)) (Select0 <types.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <types.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ // result: (Int64Make (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y)) (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y)) (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpAdd32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpAdd32, types.UInt32)
- v5 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v7.AddArg(y)
v5.AddArg(v7)
v4.AddArg(v5)
- v8 := b.NewValue0(v.Pos, OpSelect0, types.UInt32)
- v9 := b.NewValue0(v.Pos, OpMul32uhilo, MakeTuple(types.UInt32, types.UInt32))
- v10 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32))
+ v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v10.AddArg(x)
v9.AddArg(v10)
- v11 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v11.AddArg(y)
v9.AddArg(v11)
v8.AddArg(v9)
v4.AddArg(v8)
v0.AddArg(v4)
v.AddArg(v0)
- v12 := b.NewValue0(v.Pos, OpSelect1, types.UInt32)
- v13 := b.NewValue0(v.Pos, OpMul32uhilo, MakeTuple(types.UInt32, types.UInt32))
- v14 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v12 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32))
+ v14 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v14.AddArg(x)
v13.AddArg(v14)
- v15 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v15 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v15.AddArg(y)
v13.AddArg(v15)
v12.AddArg(v13)
@@ -1468,8 +1470,8 @@ func rewriteValuedec64_OpNeg64_0(v *Value) bool {
func rewriteValuedec64_OpNeq64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq64 x y)
// cond:
// result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y)))
@@ -1477,19 +1479,19 @@ func rewriteValuedec64_OpNeq64_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpNeq32, types.Bool)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpNeq32, types.Bool)
- v4 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
@@ -1499,28 +1501,28 @@ func rewriteValuedec64_OpNeq64_0(v *Value) bool {
func rewriteValuedec64_OpOr64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Or64 x y)
// cond:
- // result: (Int64Make (Or32 <types.UInt32> (Int64Hi x) (Int64Hi y)) (Or32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
+ // result: (Int64Make (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
@@ -1530,8 +1532,8 @@ func rewriteValuedec64_OpOr64_0(v *Value) bool {
func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
@@ -1576,7 +1578,7 @@ func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool {
}
// match: (Rsh16Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh16Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1590,8 +1592,8 @@ func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpRsh16Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1603,8 +1605,8 @@ func rewriteValuedec64_OpRsh16Ux64_0(v *Value) bool {
func rewriteValuedec64_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Signmask (SignExt16to32 x))
@@ -1623,7 +1625,7 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool {
break
}
v.reset(OpSignmask)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1652,7 +1654,7 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool {
}
// match: (Rsh16x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh16x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1666,8 +1668,8 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpRsh16x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1679,8 +1681,8 @@ func rewriteValuedec64_OpRsh16x64_0(v *Value) bool {
func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
@@ -1725,7 +1727,7 @@ func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool {
}
// match: (Rsh32Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh32Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1739,8 +1741,8 @@ func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool {
}
v.reset(OpRsh32Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1752,8 +1754,8 @@ func rewriteValuedec64_OpRsh32Ux64_0(v *Value) bool {
func rewriteValuedec64_OpRsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Signmask x)
@@ -1799,7 +1801,7 @@ func rewriteValuedec64_OpRsh32x64_0(v *Value) bool {
}
// match: (Rsh32x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh32x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1813,8 +1815,8 @@ func rewriteValuedec64_OpRsh32x64_0(v *Value) bool {
}
v.reset(OpRsh32x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -1826,11 +1828,11 @@ func rewriteValuedec64_OpRsh32x64_0(v *Value) bool {
func rewriteValuedec64_OpRsh64Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux16 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32Ux16 <types.UInt32> hi s) (Or32 <types.UInt32> (Or32 <types.UInt32> (Rsh32Ux16 <types.UInt32> lo s) (Lsh32x16 <types.UInt32> hi (Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s))) (Rsh32Ux16 <types.UInt32> hi (Sub16 <types.UInt16> s (Const16 <types.UInt16> [32])))))
+ // result: (Int64Make (Rsh32Ux16 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> lo s) (Lsh32x16 <typ.UInt32> hi (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Rsh32Ux16 <typ.UInt32> hi (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -1840,31 +1842,31 @@ func rewriteValuedec64_OpRsh64Ux16_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux16, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x16, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub16, types.UInt16)
- v6 := b.NewValue0(v.Pos, OpConst16, types.UInt16)
+ v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux16, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
v7.AddArg(hi)
- v8 := b.NewValue0(v.Pos, OpSub16, types.UInt16)
+ v8 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
v8.AddArg(s)
- v9 := b.NewValue0(v.Pos, OpConst16, types.UInt16)
+ v9 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
v9.AuxInt = 32
v8.AddArg(v9)
v7.AddArg(v8)
@@ -1877,11 +1879,11 @@ func rewriteValuedec64_OpRsh64Ux16_0(v *Value) bool {
func rewriteValuedec64_OpRsh64Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux32 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32Ux32 <types.UInt32> hi s) (Or32 <types.UInt32> (Or32 <types.UInt32> (Rsh32Ux32 <types.UInt32> lo s) (Lsh32x32 <types.UInt32> hi (Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s))) (Rsh32Ux32 <types.UInt32> hi (Sub32 <types.UInt32> s (Const32 <types.UInt32> [32])))))
+ // result: (Int64Make (Rsh32Ux32 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> lo s) (Lsh32x32 <typ.UInt32> hi (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Rsh32Ux32 <typ.UInt32> hi (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -1891,31 +1893,31 @@ func rewriteValuedec64_OpRsh64Ux32_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub32, types.UInt32)
- v6 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux32, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
v7.AddArg(hi)
- v8 := b.NewValue0(v.Pos, OpSub32, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
v8.AddArg(s)
- v9 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v9.AuxInt = 32
v8.AddArg(v9)
v7.AddArg(v8)
@@ -1928,8 +1930,8 @@ func rewriteValuedec64_OpRsh64Ux32_0(v *Value) bool {
func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const64 [0])
@@ -1974,7 +1976,7 @@ func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool {
}
// match: (Rsh64Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh64Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -1988,8 +1990,8 @@ func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool {
}
v.reset(OpRsh64Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -2001,11 +2003,11 @@ func rewriteValuedec64_OpRsh64Ux64_0(v *Value) bool {
func rewriteValuedec64_OpRsh64Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux8 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32Ux8 <types.UInt32> hi s) (Or32 <types.UInt32> (Or32 <types.UInt32> (Rsh32Ux8 <types.UInt32> lo s) (Lsh32x8 <types.UInt32> hi (Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s))) (Rsh32Ux8 <types.UInt32> hi (Sub8 <types.UInt8> s (Const8 <types.UInt8> [32])))))
+ // result: (Int64Make (Rsh32Ux8 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> lo s) (Lsh32x8 <typ.UInt32> hi (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Rsh32Ux8 <typ.UInt32> hi (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -2015,31 +2017,31 @@ func rewriteValuedec64_OpRsh64Ux8_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux8, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x8, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub8, types.UInt8)
- v6 := b.NewValue0(v.Pos, OpConst8, types.UInt8)
+ v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux8, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
v7.AddArg(hi)
- v8 := b.NewValue0(v.Pos, OpSub8, types.UInt8)
+ v8 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
v8.AddArg(s)
- v9 := b.NewValue0(v.Pos, OpConst8, types.UInt8)
+ v9 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
v9.AuxInt = 32
v8.AddArg(v9)
v7.AddArg(v8)
@@ -2052,11 +2054,11 @@ func rewriteValuedec64_OpRsh64Ux8_0(v *Value) bool {
func rewriteValuedec64_OpRsh64x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x16 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32x16 <types.UInt32> hi s) (Or32 <types.UInt32> (Or32 <types.UInt32> (Rsh32Ux16 <types.UInt32> lo s) (Lsh32x16 <types.UInt32> hi (Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s))) (And32 <types.UInt32> (Rsh32x16 <types.UInt32> hi (Sub16 <types.UInt16> s (Const16 <types.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <types.UInt16> s (Const32 <types.UInt32> [5])))))))
+ // result: (Int64Make (Rsh32x16 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> lo s) (Lsh32x16 <typ.UInt32> hi (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (And32 <typ.UInt32> (Rsh32x16 <typ.UInt32> hi (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -2066,41 +2068,41 @@ func rewriteValuedec64_OpRsh64x16_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32x16, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x16, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub16, types.UInt16)
- v6 := b.NewValue0(v.Pos, OpConst16, types.UInt16)
+ v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpAnd32, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpRsh32x16, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
v8.AddArg(hi)
- v9 := b.NewValue0(v.Pos, OpSub16, types.UInt16)
+ v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
v9.AddArg(s)
- v10 := b.NewValue0(v.Pos, OpConst16, types.UInt16)
+ v10 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
v10.AuxInt = 32
v9.AddArg(v10)
v8.AddArg(v9)
v7.AddArg(v8)
- v11 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
- v12 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
- v13 := b.NewValue0(v.Pos, OpRsh16Ux32, types.UInt16)
+ v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v12 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16)
v13.AddArg(s)
- v14 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v14.AuxInt = 5
v13.AddArg(v14)
v12.AddArg(v13)
@@ -2115,11 +2117,11 @@ func rewriteValuedec64_OpRsh64x16_0(v *Value) bool {
func rewriteValuedec64_OpRsh64x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x32 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32x32 <types.UInt32> hi s) (Or32 <types.UInt32> (Or32 <types.UInt32> (Rsh32Ux32 <types.UInt32> lo s) (Lsh32x32 <types.UInt32> hi (Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s))) (And32 <types.UInt32> (Rsh32x32 <types.UInt32> hi (Sub32 <types.UInt32> s (Const32 <types.UInt32> [32]))) (Zeromask (Rsh32Ux32 <types.UInt32> s (Const32 <types.UInt32> [5]))))))
+ // result: (Int64Make (Rsh32x32 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> lo s) (Lsh32x32 <typ.UInt32> hi (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (And32 <typ.UInt32> (Rsh32x32 <typ.UInt32> hi (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))) (Zeromask (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -2129,40 +2131,40 @@ func rewriteValuedec64_OpRsh64x32_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32x32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub32, types.UInt32)
- v6 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpAnd32, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpRsh32x32, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
v8.AddArg(hi)
- v9 := b.NewValue0(v.Pos, OpSub32, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
v9.AddArg(s)
- v10 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v10 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v10.AuxInt = 32
v9.AddArg(v10)
v8.AddArg(v9)
v7.AddArg(v8)
- v11 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
- v12 := b.NewValue0(v.Pos, OpRsh32Ux32, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v12 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
v12.AddArg(s)
- v13 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v13 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v13.AuxInt = 5
v12.AddArg(v13)
v11.AddArg(v12)
@@ -2176,8 +2178,8 @@ func rewriteValuedec64_OpRsh64x32_0(v *Value) bool {
func rewriteValuedec64_OpRsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
@@ -2196,13 +2198,13 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool {
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
- v3 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v3.AddArg(x)
v2.AddArg(v3)
v.AddArg(v2)
@@ -2232,7 +2234,7 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool {
}
// match: (Rsh64x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh64x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -2246,8 +2248,8 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool {
}
v.reset(OpRsh64x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -2259,11 +2261,11 @@ func rewriteValuedec64_OpRsh64x64_0(v *Value) bool {
func rewriteValuedec64_OpRsh64x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x8 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32x8 <types.UInt32> hi s) (Or32 <types.UInt32> (Or32 <types.UInt32> (Rsh32Ux8 <types.UInt32> lo s) (Lsh32x8 <types.UInt32> hi (Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s))) (And32 <types.UInt32> (Rsh32x8 <types.UInt32> hi (Sub8 <types.UInt8> s (Const8 <types.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <types.UInt8> s (Const32 <types.UInt32> [5])))))))
+ // result: (Int64Make (Rsh32x8 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> lo s) (Lsh32x8 <typ.UInt32> hi (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (And32 <typ.UInt32> (Rsh32x8 <typ.UInt32> hi (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
@@ -2273,41 +2275,41 @@ func rewriteValuedec64_OpRsh64x8_0(v *Value) bool {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32x8, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x8, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub8, types.UInt8)
- v6 := b.NewValue0(v.Pos, OpConst8, types.UInt8)
+ v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpAnd32, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpRsh32x8, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
v8.AddArg(hi)
- v9 := b.NewValue0(v.Pos, OpSub8, types.UInt8)
+ v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
v9.AddArg(s)
- v10 := b.NewValue0(v.Pos, OpConst8, types.UInt8)
+ v10 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
v10.AuxInt = 32
v9.AddArg(v10)
v8.AddArg(v9)
v7.AddArg(v8)
- v11 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
- v12 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
- v13 := b.NewValue0(v.Pos, OpRsh8Ux32, types.UInt8)
+ v11 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v12 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8)
v13.AddArg(s)
- v14 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v14.AuxInt = 5
v13.AddArg(v14)
v12.AddArg(v13)
@@ -2322,8 +2324,8 @@ func rewriteValuedec64_OpRsh64x8_0(v *Value) bool {
func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
@@ -2368,7 +2370,7 @@ func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool {
}
// match: (Rsh8Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh8Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -2382,8 +2384,8 @@ func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpRsh8Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -2395,8 +2397,8 @@ func rewriteValuedec64_OpRsh8Ux64_0(v *Value) bool {
func rewriteValuedec64_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Signmask (SignExt8to32 x))
@@ -2415,7 +2417,7 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool {
break
}
v.reset(OpSignmask)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2444,7 +2446,7 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool {
}
// match: (Rsh8x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh8x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -2458,8 +2460,8 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpRsh8x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeromask, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
@@ -2471,15 +2473,15 @@ func rewriteValuedec64_OpRsh8x64_0(v *Value) bool {
func rewriteValuedec64_OpSignExt16to64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (SignExt16to64 x)
// cond:
// result: (SignExt32to64 (SignExt16to32 x))
for {
x := v.Args[0]
v.reset(OpSignExt32to64)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2488,15 +2490,15 @@ func rewriteValuedec64_OpSignExt16to64_0(v *Value) bool {
func rewriteValuedec64_OpSignExt32to64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (SignExt32to64 x)
// cond:
// result: (Int64Make (Signmask x) x)
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpSignmask, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(x)
@@ -2506,15 +2508,15 @@ func rewriteValuedec64_OpSignExt32to64_0(v *Value) bool {
func rewriteValuedec64_OpSignExt8to64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (SignExt8to64 x)
// cond:
// result: (SignExt32to64 (SignExt8to32 x))
for {
x := v.Args[0]
v.reset(OpSignExt32to64)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2526,7 +2528,7 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
config := b.Func.Config
_ = config
// match: (Store {t} dst (Int64Make hi lo) mem)
- // cond: t.(Type).Size() == 8 && !config.BigEndian
+ // cond: t.(*types.Type).Size() == 8 && !config.BigEndian
// result: (Store {hi.Type} (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store {lo.Type} dst lo mem))
for {
t := v.Aux
@@ -2538,7 +2540,7 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
hi := v_1.Args[0]
lo := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && !config.BigEndian) {
+ if !(t.(*types.Type).Size() == 8 && !config.BigEndian) {
break
}
v.reset(OpStore)
@@ -2548,7 +2550,7 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(hi)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = lo.Type
v1.AddArg(dst)
v1.AddArg(lo)
@@ -2557,7 +2559,7 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} dst (Int64Make hi lo) mem)
- // cond: t.(Type).Size() == 8 && config.BigEndian
+ // cond: t.(*types.Type).Size() == 8 && config.BigEndian
// result: (Store {lo.Type} (OffPtr <lo.Type.PtrTo()> [4] dst) lo (Store {hi.Type} dst hi mem))
for {
t := v.Aux
@@ -2569,7 +2571,7 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
hi := v_1.Args[0]
lo := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && config.BigEndian) {
+ if !(t.(*types.Type).Size() == 8 && config.BigEndian) {
break
}
v.reset(OpStore)
@@ -2579,7 +2581,7 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(lo)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = hi.Type
v1.AddArg(dst)
v1.AddArg(hi)
@@ -2592,39 +2594,39 @@ func rewriteValuedec64_OpStore_0(v *Value) bool {
func rewriteValuedec64_OpSub64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Sub64 x y)
// cond:
- // result: (Int64Make (Sub32withcarry <types.Int32> (Int64Hi x) (Int64Hi y) (Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <types.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ // result: (Int64Make (Sub32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpSub32withcarry, types.Int32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v4 := b.NewValue0(v.Pos, OpSub32carry, MakeTuple(types.UInt32, TypeFlags))
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
v0.AddArg(v3)
v.AddArg(v0)
- v7 := b.NewValue0(v.Pos, OpSelect0, types.UInt32)
- v8 := b.NewValue0(v.Pos, OpSub32carry, MakeTuple(types.UInt32, TypeFlags))
- v9 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v9.AddArg(x)
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v10 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v10.AddArg(y)
v8.AddArg(v10)
v7.AddArg(v8)
@@ -2684,28 +2686,28 @@ func rewriteValuedec64_OpTrunc64to8_0(v *Value) bool {
func rewriteValuedec64_OpXor64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Xor64 x y)
// cond:
- // result: (Int64Make (Xor32 <types.UInt32> (Int64Hi x) (Int64Hi y)) (Xor32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
+ // result: (Int64Make (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpXor32, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpXor32, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
@@ -2715,15 +2717,15 @@ func rewriteValuedec64_OpXor64_0(v *Value) bool {
func rewriteValuedec64_OpZeroExt16to64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ZeroExt16to64 x)
// cond:
// result: (ZeroExt32to64 (ZeroExt16to32 x))
for {
x := v.Args[0]
v.reset(OpZeroExt32to64)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2732,15 +2734,15 @@ func rewriteValuedec64_OpZeroExt16to64_0(v *Value) bool {
func rewriteValuedec64_OpZeroExt32to64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ZeroExt32to64 x)
// cond:
- // result: (Int64Make (Const32 <types.UInt32> [0]) x)
+ // result: (Int64Make (Const32 <typ.UInt32> [0]) x)
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
@@ -2750,15 +2752,15 @@ func rewriteValuedec64_OpZeroExt32to64_0(v *Value) bool {
func rewriteValuedec64_OpZeroExt8to64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ZeroExt8to64 x)
// cond:
// result: (ZeroExt32to64 (ZeroExt8to32 x))
for {
x := v.Args[0]
v.reset(OpZeroExt32to64)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2769,8 +2771,8 @@ func rewriteBlockdec64(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 43a15212cd..f41d3fa151 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValuegeneric(v *Value) bool {
switch v.Op {
@@ -5099,11 +5101,11 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Arg {n} [off])
// cond: v.Type.IsString()
- // result: (StringMake (Arg <types.BytePtr> {n} [off]) (Arg <types.Int> {n} [off+config.PtrSize]))
+ // result: (StringMake (Arg <typ.BytePtr> {n} [off]) (Arg <typ.Int> {n} [off+config.PtrSize]))
for {
off := v.AuxInt
n := v.Aux
@@ -5111,11 +5113,11 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpArg, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.Int)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
@@ -5123,7 +5125,7 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: v.Type.IsSlice()
- // result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <types.Int> {n} [off+config.PtrSize]) (Arg <types.Int> {n} [off+2*config.PtrSize]))
+ // result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <typ.Int> {n} [off+config.PtrSize]) (Arg <typ.Int> {n} [off+2*config.PtrSize]))
for {
off := v.AuxInt
n := v.Aux
@@ -5135,11 +5137,11 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.Int)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpArg, types.Int)
+ v2 := b.NewValue0(v.Pos, OpArg, typ.Int)
v2.AuxInt = off + 2*config.PtrSize
v2.Aux = n
v.AddArg(v2)
@@ -5147,7 +5149,7 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: v.Type.IsInterface()
- // result: (IMake (Arg <types.BytePtr> {n} [off]) (Arg <types.BytePtr> {n} [off+config.PtrSize]))
+ // result: (IMake (Arg <typ.BytePtr> {n} [off]) (Arg <typ.BytePtr> {n} [off+config.PtrSize]))
for {
off := v.AuxInt
n := v.Aux
@@ -5155,11 +5157,11 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
break
}
v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpArg, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
@@ -5167,7 +5169,7 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: v.Type.IsComplex() && v.Type.Size() == 16
- // result: (ComplexMake (Arg <types.Float64> {n} [off]) (Arg <types.Float64> {n} [off+8]))
+ // result: (ComplexMake (Arg <typ.Float64> {n} [off]) (Arg <typ.Float64> {n} [off+8]))
for {
off := v.AuxInt
n := v.Aux
@@ -5175,11 +5177,11 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpArg, types.Float64)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Float64)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.Float64)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Float64)
v1.AuxInt = off + 8
v1.Aux = n
v.AddArg(v1)
@@ -5187,7 +5189,7 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
}
// match: (Arg {n} [off])
// cond: v.Type.IsComplex() && v.Type.Size() == 8
- // result: (ComplexMake (Arg <types.Float32> {n} [off]) (Arg <types.Float32> {n} [off+4]))
+ // result: (ComplexMake (Arg <typ.Float32> {n} [off]) (Arg <typ.Float32> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
@@ -5195,11 +5197,11 @@ func rewriteValuegeneric_OpArg_0(v *Value) bool {
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpArg, types.Float32)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Float32)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, types.Float32)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Float32)
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
@@ -5519,16 +5521,16 @@ func rewriteValuegeneric_OpCom8_0(v *Value) bool {
func rewriteValuegeneric_OpConstInterface_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ConstInterface)
// cond:
- // result: (IMake (ConstNil <types.BytePtr>) (ConstNil <types.BytePtr>))
+ // result: (IMake (ConstNil <typ.BytePtr>) (ConstNil <typ.BytePtr>))
for {
v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpConstNil, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConstNil, types.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
v.AddArg(v1)
return true
}
@@ -5538,11 +5540,11 @@ func rewriteValuegeneric_OpConstSlice_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ConstSlice)
// cond: config.PtrSize == 4
- // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const32 <types.Int> [0]) (Const32 <types.Int> [0]))
+ // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const32 <typ.Int> [0]) (Const32 <typ.Int> [0]))
for {
if !(config.PtrSize == 4) {
break
@@ -5550,17 +5552,17 @@ func rewriteValuegeneric_OpConstSlice_0(v *Value) bool {
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.ElemType().PtrTo())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, types.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpConst32, types.Int)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int)
v2.AuxInt = 0
v.AddArg(v2)
return true
}
// match: (ConstSlice)
// cond: config.PtrSize == 8
- // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const64 <types.Int> [0]) (Const64 <types.Int> [0]))
+ // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const64 <typ.Int> [0]) (Const64 <typ.Int> [0]))
for {
if !(config.PtrSize == 8) {
break
@@ -5568,10 +5570,10 @@ func rewriteValuegeneric_OpConstSlice_0(v *Value) bool {
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.ElemType().PtrTo())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst64, types.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpConst64, types.Int)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int)
v2.AuxInt = 0
v.AddArg(v2)
return true
@@ -5585,74 +5587,74 @@ func rewriteValuegeneric_OpConstString_0(v *Value) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ConstString {s})
// cond: config.PtrSize == 4 && s.(string) == ""
- // result: (StringMake (ConstNil) (Const32 <types.Int> [0]))
+ // result: (StringMake (ConstNil) (Const32 <typ.Int> [0]))
for {
s := v.Aux
if !(config.PtrSize == 4 && s.(string) == "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpConstNil, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, types.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
v1.AuxInt = 0
v.AddArg(v1)
return true
}
// match: (ConstString {s})
// cond: config.PtrSize == 8 && s.(string) == ""
- // result: (StringMake (ConstNil) (Const64 <types.Int> [0]))
+ // result: (StringMake (ConstNil) (Const64 <typ.Int> [0]))
for {
s := v.Aux
if !(config.PtrSize == 8 && s.(string) == "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpConstNil, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst64, types.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
v1.AuxInt = 0
v.AddArg(v1)
return true
}
// match: (ConstString {s})
// cond: config.PtrSize == 4 && s.(string) != ""
- // result: (StringMake (Addr <types.BytePtr> {fe.StringData(s.(string))} (SB)) (Const32 <types.Int> [int64(len(s.(string)))]))
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(s.(string))} (SB)) (Const32 <typ.Int> [int64(len(s.(string)))]))
for {
s := v.Aux
if !(config.PtrSize == 4 && s.(string) != "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpAddr, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
v0.Aux = fe.StringData(s.(string))
- v1 := b.NewValue0(v.Pos, OpSB, types.Uintptr)
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst32, types.Int)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int)
v2.AuxInt = int64(len(s.(string)))
v.AddArg(v2)
return true
}
// match: (ConstString {s})
// cond: config.PtrSize == 8 && s.(string) != ""
- // result: (StringMake (Addr <types.BytePtr> {fe.StringData(s.(string))} (SB)) (Const64 <types.Int> [int64(len(s.(string)))]))
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(s.(string))} (SB)) (Const64 <typ.Int> [int64(len(s.(string)))]))
for {
s := v.Aux
if !(config.PtrSize == 8 && s.(string) != "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpAddr, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
v0.Aux = fe.StringData(s.(string))
- v1 := b.NewValue0(v.Pos, OpSB, types.Uintptr)
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.Int)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int)
v2.AuxInt = int64(len(s.(string)))
v.AddArg(v2)
return true
@@ -5761,8 +5763,8 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool {
func rewriteValuegeneric_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 (Const16 [c]) (Const16 [d]))
// cond: d != 0
// result: (Const16 [int64(int16(c)/int16(d))])
@@ -5809,7 +5811,7 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool {
}
// match: (Div16 <t> x (Const16 [-1<<15]))
// cond:
- // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <types.UInt64> [15]))
+ // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
for {
t := v.Type
x := v.Args[0]
@@ -5827,14 +5829,14 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool {
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 15
v.AddArg(v2)
return true
}
// match: (Div16 <t> n (Const16 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <types.UInt64> [15])) (Const64 <types.UInt64> [16-log2(c)]))) (Const64 <types.UInt64> [log2(c)]))
+ // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [16-log2(c)]))) (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -5852,23 +5854,23 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh16x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 15
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 16 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div16 <t> x (Const16 [c]))
// cond: smagicOK(16,c)
- // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <types.UInt32> (Const32 <types.UInt32> [int64(smagic(16,c).m)]) (SignExt16to32 x)) (Const64 <types.UInt64> [16+smagic(16,c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <types.UInt64> [31])))
+ // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(smagic(16,c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic(16,c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v.Args[0]
@@ -5883,23 +5885,23 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool {
v.reset(OpSub16)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v1 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(smagic(16, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 16 + smagic(16, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v6 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v6 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v7.AuxInt = 31
v5.AddArg(v7)
v.AddArg(v5)
@@ -5912,8 +5914,8 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u (Const16 [c]) (Const16 [d]))
// cond: d != 0
// result: (Const16 [int64(int16(uint16(c)/uint16(d)))])
@@ -5937,7 +5939,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
}
// match: (Div16u n (Const16 [c]))
// cond: isPowerOfTwo(c&0xffff)
- // result: (Rsh16Ux64 n (Const64 <types.UInt64> [log2(c&0xffff)]))
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log2(c&0xffff)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
@@ -5950,14 +5952,14 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
}
v.reset(OpRsh16Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c & 0xffff)
v.AddArg(v0)
return true
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 8
- // result: (Trunc64to16 (Rsh64Ux64 <types.UInt64> (Mul64 <types.UInt64> (Const64 <types.UInt64> [int64(1<<16+umagic(16,c).m)]) (ZeroExt16to64 x)) (Const64 <types.UInt64> [16+umagic(16,c).s])))
+ // result: (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic(16,c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic(16,c).s])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -5969,16 +5971,16 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
break
}
v.reset(OpTrunc64to16)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpMul64, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = int64(1<<16 + umagic(16, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 16 + umagic(16, c).s
v0.AddArg(v4)
v.AddArg(v0)
@@ -5986,7 +5988,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0
- // result: (Trunc32to16 (Rsh32Ux64 <types.UInt32> (Mul32 <types.UInt32> (Const32 <types.UInt32> [int64(1<<15+umagic(16,c).m/2)]) (ZeroExt16to32 x)) (Const64 <types.UInt64> [16+umagic(16,c).s-1])))
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(1<<15+umagic(16,c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -5998,16 +6000,16 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
break
}
v.reset(OpTrunc32to16)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(1<<15 + umagic(16, c).m/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 16 + umagic(16, c).s - 1
v0.AddArg(v4)
v.AddArg(v0)
@@ -6015,7 +6017,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 4 && c&1 == 0
- // result: (Trunc32to16 (Rsh32Ux64 <types.UInt32> (Mul32 <types.UInt32> (Const32 <types.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)]) (Rsh32Ux64 <types.UInt32> (ZeroExt16to32 x) (Const64 <types.UInt64> [1]))) (Const64 <types.UInt64> [16+umagic(16,c).s-2])))
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic(16,c).s-2])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6027,21 +6029,21 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
break
}
v.reset(OpTrunc32to16)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(1<<15 + (umagic(16, c).m+1)/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux64, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 1
v3.AddArg(v5)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v6.AuxInt = 16 + umagic(16, c).s - 2
v0.AddArg(v6)
v.AddArg(v0)
@@ -6049,7 +6051,7 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 4
- // result: (Trunc32to16 (Rsh32Ux64 <types.UInt32> (Avg32u (Lsh32x64 <types.UInt32> (ZeroExt16to32 x) (Const64 <types.UInt64> [16])) (Mul32 <types.UInt32> (Const32 <types.UInt32> [int64(umagic(16,c).m)]) (ZeroExt16to32 x))) (Const64 <types.UInt64> [16+umagic(16,c).s-1])))
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(umagic(16,c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic(16,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6061,26 +6063,26 @@ func rewriteValuegeneric_OpDiv16u_0(v *Value) bool {
break
}
v.reset(OpTrunc32to16)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpAvg32u, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpLsh32x64, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 16
v2.AddArg(v4)
v1.AddArg(v2)
- v5 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v6 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v6.AuxInt = int64(umagic(16, c).m)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v7.AddArg(x)
v5.AddArg(v7)
v1.AddArg(v5)
v0.AddArg(v1)
- v8 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v8 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v8.AuxInt = 16 + umagic(16, c).s - 1
v0.AddArg(v8)
v.AddArg(v0)
@@ -6093,8 +6095,8 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 (Const32 [c]) (Const32 [d]))
// cond: d != 0
// result: (Const32 [int64(int32(c)/int32(d))])
@@ -6141,7 +6143,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
}
// match: (Div32 <t> x (Const32 [-1<<31]))
// cond:
- // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <types.UInt64> [31]))
+ // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
for {
t := v.Type
x := v.Args[0]
@@ -6159,14 +6161,14 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 31
v.AddArg(v2)
return true
}
// match: (Div32 <t> n (Const32 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <types.UInt64> [31])) (Const64 <types.UInt64> [32-log2(c)]))) (Const64 <types.UInt64> [log2(c)]))
+ // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [32-log2(c)]))) (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -6184,23 +6186,23 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh32x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 31
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 32 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div32 <t> x (Const32 [c]))
// cond: smagicOK(32,c) && config.RegSize == 8
- // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <types.UInt64> (Const64 <types.UInt64> [int64(smagic(32,c).m)]) (SignExt32to64 x)) (Const64 <types.UInt64> [32+smagic(32,c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <types.UInt64> [63])))
+ // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic(32,c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic(32,c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63])))
for {
t := v.Type
x := v.Args[0]
@@ -6215,23 +6217,23 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
v.reset(OpSub32)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
- v1 := b.NewValue0(v.Pos, OpMul64, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = int64(smagic(32, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 32 + smagic(32, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
- v6 := b.NewValue0(v.Pos, OpSignExt32to64, types.Int64)
+ v6 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v7.AuxInt = 63
v5.AddArg(v7)
v.AddArg(v5)
@@ -6239,7 +6241,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
}
// match: (Div32 <t> x (Const32 [c]))
// cond: smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0
- // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <types.UInt32> [int64(int32(smagic(32,c).m/2))]) x) (Const64 <types.UInt64> [smagic(32,c).s-1])) (Rsh32x64 <t> x (Const64 <types.UInt64> [31])))
+ // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m/2))]) x) (Const64 <typ.UInt64> [smagic(32,c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v.Args[0]
@@ -6255,18 +6257,18 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpHmul32, t)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(int32(smagic(32, c).m / 2))
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = smagic(32, c).s - 1
v0.AddArg(v3)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpRsh32x64, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 31
v4.AddArg(v5)
v.AddArg(v4)
@@ -6274,7 +6276,7 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
}
// match: (Div32 <t> x (Const32 [c]))
// cond: smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0
- // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <types.UInt32> [int64(int32(smagic(32,c).m))]) x) x) (Const64 <types.UInt64> [smagic(32,c).s])) (Rsh32x64 <t> x (Const64 <types.UInt64> [31])))
+ // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int64(int32(smagic(32,c).m))]) x) x) (Const64 <typ.UInt64> [smagic(32,c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v.Args[0]
@@ -6291,20 +6293,20 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpAdd32, t)
v2 := b.NewValue0(v.Pos, OpHmul32, t)
- v3 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v3.AuxInt = int64(int32(smagic(32, c).m))
v2.AddArg(v3)
v2.AddArg(x)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = smagic(32, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
v5.AddArg(x)
- v6 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v6.AuxInt = 31
v5.AddArg(v6)
v.AddArg(v5)
@@ -6361,8 +6363,8 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u (Const32 [c]) (Const32 [d]))
// cond: d != 0
// result: (Const32 [int64(int32(uint32(c)/uint32(d)))])
@@ -6386,7 +6388,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
}
// match: (Div32u n (Const32 [c]))
// cond: isPowerOfTwo(c&0xffffffff)
- // result: (Rsh32Ux64 n (Const64 <types.UInt64> [log2(c&0xffffffff)]))
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log2(c&0xffffffff)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
@@ -6399,14 +6401,14 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
}
v.reset(OpRsh32Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c & 0xffffffff)
v.AddArg(v0)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0
- // result: (Rsh32Ux64 <types.UInt32> (Hmul32u <types.UInt32> (Const32 <types.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))]) x) (Const64 <types.UInt64> [umagic(32,c).s-1]))
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))]) x) (Const64 <typ.UInt64> [umagic(32,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6418,21 +6420,21 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
break
}
v.reset(OpRsh32Ux64)
- v.Type = types.UInt32
- v0 := b.NewValue0(v.Pos, OpHmul32u, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v1.AuxInt = int64(int32(1<<31 + umagic(32, c).m/2))
v0.AddArg(v1)
v0.AddArg(x)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = umagic(32, c).s - 1
v.AddArg(v2)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 4 && c&1 == 0
- // result: (Rsh32Ux64 <types.UInt32> (Hmul32u <types.UInt32> (Const32 <types.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))]) (Rsh32Ux64 <types.UInt32> x (Const64 <types.UInt64> [1]))) (Const64 <types.UInt64> [umagic(32,c).s-2]))
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic(32,c).s-2]))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6444,26 +6446,26 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
break
}
v.reset(OpRsh32Ux64)
- v.Type = types.UInt32
- v0 := b.NewValue0(v.Pos, OpHmul32u, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v1.AuxInt = int64(int32(1<<31 + (umagic(32, c).m+1)/2))
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpRsh32Ux64, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
v2.AddArg(x)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 1
v2.AddArg(v3)
v0.AddArg(v2)
v.AddArg(v0)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = umagic(32, c).s - 2
v.AddArg(v4)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 4
- // result: (Rsh32Ux64 <types.UInt32> (Avg32u x (Hmul32u <types.UInt32> (Const32 <types.UInt32> [int64(int32(umagic(32,c).m))]) x)) (Const64 <types.UInt64> [umagic(32,c).s-1]))
+ // result: (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int64(int32(umagic(32,c).m))]) x)) (Const64 <typ.UInt64> [umagic(32,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6475,24 +6477,24 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
break
}
v.reset(OpRsh32Ux64)
- v.Type = types.UInt32
- v0 := b.NewValue0(v.Pos, OpAvg32u, types.UInt32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpHmul32u, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(int32(umagic(32, c).m))
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = umagic(32, c).s - 1
v.AddArg(v3)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0
- // result: (Trunc64to32 (Rsh64Ux64 <types.UInt64> (Mul64 <types.UInt64> (Const64 <types.UInt64> [int64(1<<31+umagic(32,c).m/2)]) (ZeroExt32to64 x)) (Const64 <types.UInt64> [32+umagic(32,c).s-1])))
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic(32,c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6504,16 +6506,16 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
break
}
v.reset(OpTrunc64to32)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpMul64, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = int64(1<<31 + umagic(32, c).m/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 32 + umagic(32, c).s - 1
v0.AddArg(v4)
v.AddArg(v0)
@@ -6521,7 +6523,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 8 && c&1 == 0
- // result: (Trunc64to32 (Rsh64Ux64 <types.UInt64> (Mul64 <types.UInt64> (Const64 <types.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)]) (Rsh64Ux64 <types.UInt64> (ZeroExt32to64 x) (Const64 <types.UInt64> [1]))) (Const64 <types.UInt64> [32+umagic(32,c).s-2])))
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic(32,c).s-2])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6533,21 +6535,21 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
break
}
v.reset(OpTrunc64to32)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpMul64, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = int64(1<<31 + (umagic(32, c).m+1)/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh64Ux64, types.UInt64)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 1
v3.AddArg(v5)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v6.AuxInt = 32 + umagic(32, c).s - 2
v0.AddArg(v6)
v.AddArg(v0)
@@ -6555,7 +6557,7 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 8
- // result: (Trunc64to32 (Rsh64Ux64 <types.UInt64> (Avg64u (Lsh64x64 <types.UInt64> (ZeroExt32to64 x) (Const64 <types.UInt64> [32])) (Mul64 <types.UInt64> (Const64 <types.UInt32> [int64(umagic(32,c).m)]) (ZeroExt32to64 x))) (Const64 <types.UInt64> [32+umagic(32,c).s-1])))
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic(32,c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic(32,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6567,26 +6569,26 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
break
}
v.reset(OpTrunc64to32)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpAvg64u, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpLsh64x64, types.UInt64)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 32
v2.AddArg(v4)
v1.AddArg(v2)
- v5 := b.NewValue0(v.Pos, OpMul64, types.UInt64)
- v6 := b.NewValue0(v.Pos, OpConst64, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32)
v6.AuxInt = int64(umagic(32, c).m)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpZeroExt32to64, types.UInt64)
+ v7 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
v7.AddArg(x)
v5.AddArg(v7)
v1.AddArg(v5)
v0.AddArg(v1)
- v8 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v8 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v8.AuxInt = 32 + umagic(32, c).s - 1
v0.AddArg(v8)
v.AddArg(v0)
@@ -6597,8 +6599,8 @@ func rewriteValuegeneric_OpDiv32u_0(v *Value) bool {
func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div64 (Const64 [c]) (Const64 [d]))
// cond: d != 0
// result: (Const64 [c/d])
@@ -6645,7 +6647,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
}
// match: (Div64 <t> x (Const64 [-1<<63]))
// cond:
- // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <types.UInt64> [63]))
+ // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
for {
t := v.Type
x := v.Args[0]
@@ -6663,14 +6665,14 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 63
v.AddArg(v2)
return true
}
// match: (Div64 <t> n (Const64 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <types.UInt64> [63])) (Const64 <types.UInt64> [64-log2(c)]))) (Const64 <types.UInt64> [log2(c)]))
+ // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [64-log2(c)]))) (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -6688,23 +6690,23 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh64x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 63
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 64 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div64 <t> x (Const64 [c]))
// cond: smagicOK(64,c) && smagic(64,c).m&1 == 0
- // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <types.UInt64> [int64(smagic(64,c).m/2)]) x) (Const64 <types.UInt64> [smagic(64,c).s-1])) (Rsh64x64 <t> x (Const64 <types.UInt64> [63])))
+ // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic(64,c).m/2)]) x) (Const64 <typ.UInt64> [smagic(64,c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
for {
t := v.Type
x := v.Args[0]
@@ -6720,18 +6722,18 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
v1 := b.NewValue0(v.Pos, OpHmul64, t)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = int64(smagic(64, c).m / 2)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = smagic(64, c).s - 1
v0.AddArg(v3)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpRsh64x64, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = 63
v4.AddArg(v5)
v.AddArg(v4)
@@ -6739,7 +6741,7 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
}
// match: (Div64 <t> x (Const64 [c]))
// cond: smagicOK(64,c) && smagic(64,c).m&1 != 0
- // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <types.UInt64> [int64(smagic(64,c).m)]) x) x) (Const64 <types.UInt64> [smagic(64,c).s])) (Rsh64x64 <t> x (Const64 <types.UInt64> [63])))
+ // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic(64,c).m)]) x) x) (Const64 <typ.UInt64> [smagic(64,c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
for {
t := v.Type
x := v.Args[0]
@@ -6756,20 +6758,20 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
v1 := b.NewValue0(v.Pos, OpAdd64, t)
v2 := b.NewValue0(v.Pos, OpHmul64, t)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = int64(smagic(64, c).m)
v2.AddArg(v3)
v2.AddArg(x)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = smagic(64, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
v5.AddArg(x)
- v6 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v6.AuxInt = 63
v5.AddArg(v6)
v.AddArg(v5)
@@ -6826,8 +6828,8 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div64u (Const64 [c]) (Const64 [d]))
// cond: d != 0
// result: (Const64 [int64(uint64(c)/uint64(d))])
@@ -6851,7 +6853,7 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
}
// match: (Div64u n (Const64 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh64Ux64 n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log2(c)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
@@ -6864,14 +6866,14 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
}
v.reset(OpRsh64Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0
- // result: (Rsh64Ux64 <types.UInt64> (Hmul64u <types.UInt64> (Const64 <types.UInt64> [int64(1<<63+umagic(64,c).m/2)]) x) (Const64 <types.UInt64> [umagic(64,c).s-1]))
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic(64,c).m/2)]) x) (Const64 <typ.UInt64> [umagic(64,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6883,21 +6885,21 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
break
}
v.reset(OpRsh64Ux64)
- v.Type = types.UInt64
- v0 := b.NewValue0(v.Pos, OpHmul64u, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = int64(1<<63 + umagic(64, c).m/2)
v0.AddArg(v1)
v0.AddArg(x)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = umagic(64, c).s - 1
v.AddArg(v2)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK(64, c) && config.RegSize == 8 && c&1 == 0
- // result: (Rsh64Ux64 <types.UInt64> (Hmul64u <types.UInt64> (Const64 <types.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)]) (Rsh64Ux64 <types.UInt64> x (Const64 <types.UInt64> [1]))) (Const64 <types.UInt64> [umagic(64,c).s-2]))
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic(64,c).s-2]))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6909,26 +6911,26 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
break
}
v.reset(OpRsh64Ux64)
- v.Type = types.UInt64
- v0 := b.NewValue0(v.Pos, OpHmul64u, types.UInt64)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = int64(1<<63 + (umagic(64, c).m+1)/2)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpRsh64Ux64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
v2.AddArg(x)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 1
v2.AddArg(v3)
v0.AddArg(v2)
v.AddArg(v0)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = umagic(64, c).s - 2
v.AddArg(v4)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK(64, c) && config.RegSize == 8
- // result: (Rsh64Ux64 <types.UInt64> (Avg64u x (Hmul64u <types.UInt64> (Const64 <types.UInt64> [int64(umagic(64,c).m)]) x)) (Const64 <types.UInt64> [umagic(64,c).s-1]))
+ // result: (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic(64,c).m)]) x)) (Const64 <typ.UInt64> [umagic(64,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -6940,17 +6942,17 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
break
}
v.reset(OpRsh64Ux64)
- v.Type = types.UInt64
- v0 := b.NewValue0(v.Pos, OpAvg64u, types.UInt64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpHmul64u, types.UInt64)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = int64(umagic(64, c).m)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = umagic(64, c).s - 1
v.AddArg(v3)
return true
@@ -6960,8 +6962,8 @@ func rewriteValuegeneric_OpDiv64u_0(v *Value) bool {
func rewriteValuegeneric_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 (Const8 [c]) (Const8 [d]))
// cond: d != 0
// result: (Const8 [int64(int8(c)/int8(d))])
@@ -7008,7 +7010,7 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool {
}
// match: (Div8 <t> x (Const8 [-1<<7 ]))
// cond:
- // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <types.UInt64> [7 ]))
+ // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
for {
t := v.Type
x := v.Args[0]
@@ -7026,14 +7028,14 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool {
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v2.AuxInt = 7
v.AddArg(v2)
return true
}
// match: (Div8 <t> n (Const8 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <types.UInt64> [ 7])) (Const64 <types.UInt64> [ 8-log2(c)]))) (Const64 <types.UInt64> [log2(c)]))
+ // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [ 8-log2(c)]))) (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -7051,23 +7053,23 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool {
v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh8x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v3.AuxInt = 7
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 8 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div8 <t> x (Const8 [c]))
// cond: smagicOK(8,c)
- // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <types.UInt32> (Const32 <types.UInt32> [int64(smagic(8,c).m)]) (SignExt8to32 x)) (Const64 <types.UInt64> [8+smagic(8,c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <types.UInt64> [31])))
+ // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(smagic(8,c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic(8,c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31])))
for {
t := v.Type
x := v.Args[0]
@@ -7082,23 +7084,23 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool {
v.reset(OpSub8)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v1 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(smagic(8, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 8 + smagic(8, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v6 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v6 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v7.AuxInt = 31
v5.AddArg(v7)
v.AddArg(v5)
@@ -7109,8 +7111,8 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool {
func rewriteValuegeneric_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u (Const8 [c]) (Const8 [d]))
// cond: d != 0
// result: (Const8 [int64(int8(uint8(c)/uint8(d)))])
@@ -7134,7 +7136,7 @@ func rewriteValuegeneric_OpDiv8u_0(v *Value) bool {
}
// match: (Div8u n (Const8 [c]))
// cond: isPowerOfTwo(c&0xff)
- // result: (Rsh8Ux64 n (Const64 <types.UInt64> [log2(c&0xff)]))
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log2(c&0xff)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
@@ -7147,14 +7149,14 @@ func rewriteValuegeneric_OpDiv8u_0(v *Value) bool {
}
v.reset(OpRsh8Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c & 0xff)
v.AddArg(v0)
return true
}
// match: (Div8u x (Const8 [c]))
// cond: umagicOK(8, c)
- // result: (Trunc32to8 (Rsh32Ux64 <types.UInt32> (Mul32 <types.UInt32> (Const32 <types.UInt32> [int64(1<<8+umagic(8,c).m)]) (ZeroExt8to32 x)) (Const64 <types.UInt64> [8+umagic(8,c).s])))
+ // result: (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int64(1<<8+umagic(8,c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic(8,c).s])))
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7166,16 +7168,16 @@ func rewriteValuegeneric_OpDiv8u_0(v *Value) bool {
break
}
v.reset(OpTrunc32to8)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpMul32, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpConst32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
v2.AuxInt = int64(1<<8 + umagic(8, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v4.AuxInt = 8 + umagic(8, c).s
v0.AddArg(v4)
v.AddArg(v0)
@@ -7983,8 +7985,8 @@ func rewriteValuegeneric_OpEqB_0(v *Value) bool {
func rewriteValuegeneric_OpEqInter_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqInter x y)
// cond:
// result: (EqPtr (ITab x) (ITab y))
@@ -7992,10 +7994,10 @@ func rewriteValuegeneric_OpEqInter_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpEqPtr)
- v0 := b.NewValue0(v.Pos, OpITab, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.BytePtr)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpITab, types.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.BytePtr)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -8004,8 +8006,8 @@ func rewriteValuegeneric_OpEqInter_0(v *Value) bool {
func rewriteValuegeneric_OpEqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqPtr p (ConstNil))
// cond:
// result: (Not (IsNonNil p))
@@ -8016,7 +8018,7 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool {
break
}
v.reset(OpNot)
- v0 := b.NewValue0(v.Pos, OpIsNonNil, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
v0.AddArg(p)
v.AddArg(v0)
return true
@@ -8031,7 +8033,7 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool {
}
p := v.Args[1]
v.reset(OpNot)
- v0 := b.NewValue0(v.Pos, OpIsNonNil, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
v0.AddArg(p)
v.AddArg(v0)
return true
@@ -8097,8 +8099,8 @@ func rewriteValuegeneric_OpEqPtr_0(v *Value) bool {
func rewriteValuegeneric_OpEqSlice_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqSlice x y)
// cond:
// result: (EqPtr (SlicePtr x) (SlicePtr y))
@@ -8106,10 +8108,10 @@ func rewriteValuegeneric_OpEqSlice_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpEqPtr)
- v0 := b.NewValue0(v.Pos, OpSlicePtr, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSlicePtr, types.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -9803,7 +9805,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool {
fe := b.Func.fe
_ = fe
// match: (Load <t1> p1 (Store {t2} p2 x _))
- // cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size()
+ // cond: isSamePtr(p1,p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size()
// result: x
for {
t1 := v.Type
@@ -9815,7 +9817,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool {
t2 := v_1.Aux
p2 := v_1.Args[0]
x := v_1.Args[1]
- if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == CMPeq && t1.Size() == t2.(Type).Size()) {
+ if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size()) {
break
}
v.reset(OpCopy)
@@ -10065,8 +10067,8 @@ func rewriteValuegeneric_OpLsh16x32_0(v *Value) bool {
func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x64 (Const16 [c]) (Const64 [d]))
// cond:
// result: (Const16 [int64(int16(c) << uint64(d))])
@@ -10165,7 +10167,7 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool {
}
// match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh16x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Lsh16x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh16Ux64 {
@@ -10196,7 +10198,7 @@ func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool {
}
v.reset(OpLsh16x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
@@ -10320,8 +10322,8 @@ func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool {
func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x64 (Const32 [c]) (Const64 [d]))
// cond:
// result: (Const32 [int64(int32(c) << uint64(d))])
@@ -10420,7 +10422,7 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool {
}
// match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh32x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Lsh32x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh32Ux64 {
@@ -10451,7 +10453,7 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool {
}
v.reset(OpLsh32x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
@@ -10575,8 +10577,8 @@ func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool {
func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh64x64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [c << uint64(d)])
@@ -10675,7 +10677,7 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool {
}
// match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh64x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Lsh64x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh64Ux64 {
@@ -10706,7 +10708,7 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool {
}
v.reset(OpLsh64x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
@@ -10830,8 +10832,8 @@ func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool {
func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x64 (Const8 [c]) (Const64 [d]))
// cond:
// result: (Const8 [int64(int8(c) << uint64(d))])
@@ -10930,7 +10932,7 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool {
}
// match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh8x64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Lsh8x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh8Ux64 {
@@ -10961,7 +10963,7 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool {
}
v.reset(OpLsh8x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
@@ -11621,8 +11623,8 @@ func rewriteValuegeneric_OpMod8u_0(v *Value) bool {
func rewriteValuegeneric_OpMul16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul16 (Const16 [c]) (Const16 [d]))
// cond:
// result: (Const16 [int64(int16(c*d))])
@@ -11727,7 +11729,7 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool {
}
// match: (Mul16 <t> n (Const16 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh16x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -11742,14 +11744,14 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool {
v.reset(OpLsh16x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul16 <t> (Const16 [c]) n)
// cond: isPowerOfTwo(c)
- // result: (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh16x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
v_0 := v.Args[0]
@@ -11764,14 +11766,14 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool {
v.reset(OpLsh16x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul16 <t> n (Const16 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg16 (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
@@ -11786,7 +11788,7 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool {
v.reset(OpNeg16)
v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -11794,7 +11796,7 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool {
}
// match: (Mul16 <t> (Const16 [c]) n)
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg16 (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
v_0 := v.Args[0]
@@ -11809,7 +11811,7 @@ func rewriteValuegeneric_OpMul16_0(v *Value) bool {
v.reset(OpNeg16)
v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -11975,8 +11977,8 @@ func rewriteValuegeneric_OpMul16_10(v *Value) bool {
func rewriteValuegeneric_OpMul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul32 (Const32 [c]) (Const32 [d]))
// cond:
// result: (Const32 [int64(int32(c*d))])
@@ -12081,7 +12083,7 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool {
}
// match: (Mul32 <t> n (Const32 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh32x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -12096,14 +12098,14 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool {
v.reset(OpLsh32x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul32 <t> (Const32 [c]) n)
// cond: isPowerOfTwo(c)
- // result: (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh32x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
v_0 := v.Args[0]
@@ -12118,14 +12120,14 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool {
v.reset(OpLsh32x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul32 <t> n (Const32 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg32 (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
@@ -12140,7 +12142,7 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool {
v.reset(OpNeg32)
v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -12148,7 +12150,7 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool {
}
// match: (Mul32 <t> (Const32 [c]) n)
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg32 (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
v_0 := v.Args[0]
@@ -12163,7 +12165,7 @@ func rewriteValuegeneric_OpMul32_0(v *Value) bool {
v.reset(OpNeg32)
v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -12620,8 +12622,8 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
func rewriteValuegeneric_OpMul64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [c*d])
@@ -12726,7 +12728,7 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool {
}
// match: (Mul64 <t> n (Const64 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh64x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -12741,14 +12743,14 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool {
v.reset(OpLsh64x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul64 <t> (Const64 [c]) n)
// cond: isPowerOfTwo(c)
- // result: (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh64x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
v_0 := v.Args[0]
@@ -12763,14 +12765,14 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool {
v.reset(OpLsh64x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul64 <t> n (Const64 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg64 (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
@@ -12785,7 +12787,7 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool {
v.reset(OpNeg64)
v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -12793,7 +12795,7 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool {
}
// match: (Mul64 <t> (Const64 [c]) n)
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg64 (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
v_0 := v.Args[0]
@@ -12808,7 +12810,7 @@ func rewriteValuegeneric_OpMul64_0(v *Value) bool {
v.reset(OpNeg64)
v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -13265,8 +13267,8 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
func rewriteValuegeneric_OpMul8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mul8 (Const8 [c]) (Const8 [d]))
// cond:
// result: (Const8 [int64(int8(c*d))])
@@ -13371,7 +13373,7 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool {
}
// match: (Mul8 <t> n (Const8 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh8x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
@@ -13386,14 +13388,14 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool {
v.reset(OpLsh8x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul8 <t> (Const8 [c]) n)
// cond: isPowerOfTwo(c)
- // result: (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(c)]))
+ // result: (Lsh8x64 <t> n (Const64 <typ.UInt64> [log2(c)]))
for {
t := v.Type
v_0 := v.Args[0]
@@ -13408,14 +13410,14 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool {
v.reset(OpLsh8x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul8 <t> n (Const8 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg8 (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
@@ -13430,7 +13432,7 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool {
v.reset(OpNeg8)
v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -13438,7 +13440,7 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool {
}
// match: (Mul8 <t> (Const8 [c]) n)
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg8 (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
+ // result: (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log2(-c)])))
for {
t := v.Type
v_0 := v.Args[0]
@@ -13453,7 +13455,7 @@ func rewriteValuegeneric_OpMul8_0(v *Value) bool {
v.reset(OpNeg8)
v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
@@ -14616,8 +14618,8 @@ func rewriteValuegeneric_OpNeqB_0(v *Value) bool {
func rewriteValuegeneric_OpNeqInter_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqInter x y)
// cond:
// result: (NeqPtr (ITab x) (ITab y))
@@ -14625,10 +14627,10 @@ func rewriteValuegeneric_OpNeqInter_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpNeqPtr)
- v0 := b.NewValue0(v.Pos, OpITab, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.BytePtr)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpITab, types.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.BytePtr)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -14666,8 +14668,8 @@ func rewriteValuegeneric_OpNeqPtr_0(v *Value) bool {
func rewriteValuegeneric_OpNeqSlice_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqSlice x y)
// cond:
// result: (NeqPtr (SlicePtr x) (SlicePtr y))
@@ -14675,10 +14677,10 @@ func rewriteValuegeneric_OpNeqSlice_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpNeqPtr)
- v0 := b.NewValue0(v.Pos, OpSlicePtr, types.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSlicePtr, types.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -15431,14 +15433,14 @@ func rewriteValuegeneric_OpOffPtr_0(v *Value) bool {
return true
}
// match: (OffPtr p [0])
- // cond: v.Type.Compare(p.Type) == CMPeq
+ // cond: v.Type.Compare(p.Type) == types.CMPeq
// result: p
for {
if v.AuxInt != 0 {
break
}
p := v.Args[0]
- if !(v.Type.Compare(p.Type) == CMPeq) {
+ if !(v.Type.Compare(p.Type) == types.CMPeq) {
break
}
v.reset(OpCopy)
@@ -17716,11 +17718,11 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 4
- // result: (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().Size()])))
+ // result: (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [t.ElemType().Size()])))
for {
t := v.Type
ptr := v.Args[0]
@@ -17730,9 +17732,9 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool {
}
v.reset(OpAddPtr)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMul32, types.Int)
+ v0 := b.NewValue0(v.Pos, OpMul32, typ.Int)
v0.AddArg(idx)
- v1 := b.NewValue0(v.Pos, OpConst32, types.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
v1.AuxInt = t.ElemType().Size()
v0.AddArg(v1)
v.AddArg(v0)
@@ -17740,7 +17742,7 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool {
}
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 8
- // result: (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().Size()])))
+ // result: (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.ElemType().Size()])))
for {
t := v.Type
ptr := v.Args[0]
@@ -17750,9 +17752,9 @@ func rewriteValuegeneric_OpPtrIndex_0(v *Value) bool {
}
v.reset(OpAddPtr)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMul64, types.Int)
+ v0 := b.NewValue0(v.Pos, OpMul64, typ.Int)
v0.AddArg(idx)
- v1 := b.NewValue0(v.Pos, OpConst64, types.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
v1.AuxInt = t.ElemType().Size()
v0.AddArg(v1)
v.AddArg(v0)
@@ -17871,8 +17873,8 @@ func rewriteValuegeneric_OpRsh16Ux32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 (Const16 [c]) (Const64 [d]))
// cond:
// result: (Const16 [int64(int16(uint16(c) >> uint64(d)))])
@@ -17971,7 +17973,7 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool {
}
// match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh16Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Rsh16Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
@@ -18002,14 +18004,14 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpRsh16Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
// cond:
- // result: (ZeroExt8to16 (Trunc16to8 <types.UInt8> x))
+ // result: (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
@@ -18031,7 +18033,7 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool {
break
}
v.reset(OpZeroExt8to16)
- v0 := b.NewValue0(v.Pos, OpTrunc16to8, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.UInt8)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -18155,8 +18157,8 @@ func rewriteValuegeneric_OpRsh16x32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 (Const16 [c]) (Const64 [d]))
// cond:
// result: (Const16 [int64(int16(c) >> uint64(d))])
@@ -18239,7 +18241,7 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool {
}
// match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
// cond:
- // result: (SignExt8to16 (Trunc16to8 <types.Int8> x))
+ // result: (SignExt8to16 (Trunc16to8 <typ.Int8> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
@@ -18261,7 +18263,7 @@ func rewriteValuegeneric_OpRsh16x64_0(v *Value) bool {
break
}
v.reset(OpSignExt8to16)
- v0 := b.NewValue0(v.Pos, OpTrunc16to8, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.Int8)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -18385,8 +18387,8 @@ func rewriteValuegeneric_OpRsh32Ux32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux64 (Const32 [c]) (Const64 [d]))
// cond:
// result: (Const32 [int64(int32(uint32(c) >> uint64(d)))])
@@ -18485,7 +18487,7 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool {
}
// match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh32Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Rsh32Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
@@ -18516,14 +18518,14 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool {
}
v.reset(OpRsh32Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
// cond:
- // result: (ZeroExt8to32 (Trunc32to8 <types.UInt8> x))
+ // result: (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
@@ -18545,14 +18547,14 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool {
break
}
v.reset(OpZeroExt8to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to8, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.UInt8)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
// cond:
- // result: (ZeroExt16to32 (Trunc32to16 <types.UInt16> x))
+ // result: (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
@@ -18574,7 +18576,7 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool {
break
}
v.reset(OpZeroExt16to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to16, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.UInt16)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -18698,8 +18700,8 @@ func rewriteValuegeneric_OpRsh32x32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x64 (Const32 [c]) (Const64 [d]))
// cond:
// result: (Const32 [int64(int32(c) >> uint64(d))])
@@ -18782,7 +18784,7 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool {
}
// match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
// cond:
- // result: (SignExt8to32 (Trunc32to8 <types.Int8> x))
+ // result: (SignExt8to32 (Trunc32to8 <typ.Int8> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
@@ -18804,14 +18806,14 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool {
break
}
v.reset(OpSignExt8to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to8, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.Int8)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
// cond:
- // result: (SignExt16to32 (Trunc32to16 <types.Int16> x))
+ // result: (SignExt16to32 (Trunc32to16 <typ.Int16> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
@@ -18833,7 +18835,7 @@ func rewriteValuegeneric_OpRsh32x64_0(v *Value) bool {
break
}
v.reset(OpSignExt16to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to16, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.Int16)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -18957,8 +18959,8 @@ func rewriteValuegeneric_OpRsh64Ux32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [int64(uint64(c) >> uint64(d))])
@@ -19057,7 +19059,7 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool {
}
// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh64Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Rsh64Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19088,14 +19090,14 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool {
}
v.reset(OpRsh64Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
// cond:
- // result: (ZeroExt8to64 (Trunc64to8 <types.UInt8> x))
+ // result: (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19117,14 +19119,14 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool {
break
}
v.reset(OpZeroExt8to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to8, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.UInt8)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
// cond:
- // result: (ZeroExt16to64 (Trunc64to16 <types.UInt16> x))
+ // result: (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19146,14 +19148,14 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool {
break
}
v.reset(OpZeroExt16to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to16, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.UInt16)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
// cond:
- // result: (ZeroExt32to64 (Trunc64to32 <types.UInt32> x))
+ // result: (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19175,7 +19177,7 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool {
break
}
v.reset(OpZeroExt32to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -19299,8 +19301,8 @@ func rewriteValuegeneric_OpRsh64x32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [c >> uint64(d)])
@@ -19383,7 +19385,7 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool {
}
// match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
// cond:
- // result: (SignExt8to64 (Trunc64to8 <types.Int8> x))
+ // result: (SignExt8to64 (Trunc64to8 <typ.Int8> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19405,14 +19407,14 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool {
break
}
v.reset(OpSignExt8to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to8, types.Int8)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.Int8)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
// cond:
- // result: (SignExt16to64 (Trunc64to16 <types.Int16> x))
+ // result: (SignExt16to64 (Trunc64to16 <typ.Int16> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19434,14 +19436,14 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool {
break
}
v.reset(OpSignExt16to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to16, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.Int16)
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
// cond:
- // result: (SignExt32to64 (Trunc64to32 <types.Int32> x))
+ // result: (SignExt32to64 (Trunc64to32 <typ.Int32> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
@@ -19463,7 +19465,7 @@ func rewriteValuegeneric_OpRsh64x64_0(v *Value) bool {
break
}
v.reset(OpSignExt32to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -19587,8 +19589,8 @@ func rewriteValuegeneric_OpRsh8Ux32_0(v *Value) bool {
func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 (Const8 [c]) (Const64 [d]))
// cond:
// result: (Const8 [int64(int8(uint8(c) >> uint64(d)))])
@@ -19687,7 +19689,7 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool {
}
// match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh8Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
+ // result: (Rsh8Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh8x64 {
@@ -19718,7 +19720,7 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpRsh8Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
@@ -20505,7 +20507,7 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(f1)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t.FieldType(0)
v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
v2.AuxInt = 0
@@ -20537,14 +20539,14 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(f2)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t.FieldType(1)
v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
v2.AuxInt = t.FieldOff(1)
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(f1)
- v3 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = t.FieldType(0)
v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
v4.AuxInt = 0
@@ -20578,21 +20580,21 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(f3)
- v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t.FieldType(2)
v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
v2.AuxInt = t.FieldOff(2)
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(f2)
- v3 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = t.FieldType(1)
v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
v4.AuxInt = t.FieldOff(1)
v4.AddArg(dst)
v3.AddArg(v4)
v3.AddArg(f1)
- v5 := b.NewValue0(v.Pos, OpStore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v5.Aux = t.FieldType(0)
v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
v6.AuxInt = 0
@@ -20606,8 +20608,8 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} dst (Load src mem) mem)
- // cond: !fe.CanSSA(t.(Type))
- // result: (Move {t} [t.(Type).Size()] dst src mem)
+ // cond: !fe.CanSSA(t.(*types.Type))
+ // result: (Move {t} [t.(*types.Type).Size()] dst src mem)
for {
t := v.Aux
dst := v.Args[0]
@@ -20620,11 +20622,11 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(!fe.CanSSA(t.(Type))) {
+ if !(!fe.CanSSA(t.(*types.Type))) {
break
}
v.reset(OpMove)
- v.AuxInt = t.(Type).Size()
+ v.AuxInt = t.(*types.Type).Size()
v.Aux = t
v.AddArg(dst)
v.AddArg(src)
@@ -20632,8 +20634,8 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} dst (Load src mem) (VarDef {x} mem))
- // cond: !fe.CanSSA(t.(Type))
- // result: (Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
+ // cond: !fe.CanSSA(t.(*types.Type))
+ // result: (Move {t} [t.(*types.Type).Size()] dst src (VarDef {x} mem))
for {
t := v.Aux
dst := v.Args[0]
@@ -20651,15 +20653,15 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool {
if mem != v_2.Args[0] {
break
}
- if !(!fe.CanSSA(t.(Type))) {
+ if !(!fe.CanSSA(t.(*types.Type))) {
break
}
v.reset(OpMove)
- v.AuxInt = t.(Type).Size()
+ v.AuxInt = t.(*types.Type).Size()
v.Aux = t
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, OpVarDef, TypeMem)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
v0.Aux = x
v0.AddArg(mem)
v.AddArg(v0)
@@ -24548,8 +24550,8 @@ func rewriteBlockgeneric(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockIf:
// match: (If (Not cond) yes no)
diff --git a/src/cmd/compile/internal/ssa/schedule_test.go b/src/cmd/compile/internal/ssa/schedule_test.go
index eceaafc088..f7177dd704 100644
--- a/src/cmd/compile/internal/ssa/schedule_test.go
+++ b/src/cmd/compile/internal/ssa/schedule_test.go
@@ -4,22 +4,25 @@
package ssa
-import "testing"
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
func TestSchedule(t *testing.T) {
c := testConfig(t)
cases := []fun{
c.Fun("entry",
Bloc("entry",
- Valu("mem0", OpInitMem, TypeMem, 0, nil),
- Valu("ptr", OpConst64, TypeInt64, 0xABCD, nil),
- Valu("v", OpConst64, TypeInt64, 12, nil),
- Valu("mem1", OpStore, TypeMem, 0, TypeInt64, "ptr", "v", "mem0"),
- Valu("mem2", OpStore, TypeMem, 0, TypeInt64, "ptr", "v", "mem1"),
- Valu("mem3", OpStore, TypeMem, 0, TypeInt64, "ptr", "sum", "mem2"),
- Valu("l1", OpLoad, TypeInt64, 0, nil, "ptr", "mem1"),
- Valu("l2", OpLoad, TypeInt64, 0, nil, "ptr", "mem2"),
- Valu("sum", OpAdd64, TypeInt64, 0, nil, "l1", "l2"),
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"),
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem1"),
+ Valu("mem3", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "sum", "mem2"),
+ Valu("l1", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"),
+ Valu("l2", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem2"),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "l1", "l2"),
Goto("exit")),
Bloc("exit",
Exit("mem3"))),
@@ -62,14 +65,14 @@ func TestStoreOrder(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem0", OpInitMem, TypeMem, 0, nil),
- Valu("a", OpAdd64, TypeInt64, 0, nil, "b", "c"), // v2
- Valu("b", OpLoad, TypeInt64, 0, nil, "ptr", "mem1"), // v3
- Valu("c", OpNeg64, TypeInt64, 0, nil, "b"), // v4
- Valu("mem1", OpStore, TypeMem, 0, TypeInt64, "ptr", "v", "mem0"), // v5
- Valu("mem2", OpStore, TypeMem, 0, TypeInt64, "ptr", "a", "mem1"),
- Valu("ptr", OpConst64, TypeInt64, 0xABCD, nil),
- Valu("v", OpConst64, TypeInt64, 12, nil),
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpAdd64, c.config.Types.Int64, 0, nil, "b", "c"), // v2
+ Valu("b", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"), // v3
+ Valu("c", OpNeg64, c.config.Types.Int64, 0, nil, "b"), // v4
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"), // v5
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "a", "mem1"),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
Goto("exit")),
Bloc("exit",
Exit("mem2")))
diff --git a/src/cmd/compile/internal/ssa/shift_test.go b/src/cmd/compile/internal/ssa/shift_test.go
index 1c39f50389..ffb5a5947e 100644
--- a/src/cmd/compile/internal/ssa/shift_test.go
+++ b/src/cmd/compile/internal/ssa/shift_test.go
@@ -5,48 +5,50 @@
package ssa
import (
+ "cmd/compile/internal/types"
"testing"
)
func TestShiftConstAMD64(t *testing.T) {
c := testConfig(t)
- fun := makeConstShiftFunc(c, 18, OpLsh64x64, TypeUInt64)
+ fun := makeConstShiftFunc(c, 18, OpLsh64x64, c.config.Types.UInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
- fun = makeConstShiftFunc(c, 66, OpLsh64x64, TypeUInt64)
+ fun = makeConstShiftFunc(c, 66, OpLsh64x64, c.config.Types.UInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
- fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, TypeUInt64)
+ fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, c.config.Types.UInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
- fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, TypeUInt64)
+ fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, c.config.Types.UInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
- fun = makeConstShiftFunc(c, 18, OpRsh64x64, TypeInt64)
+ fun = makeConstShiftFunc(c, 18, OpRsh64x64, c.config.Types.Int64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
- fun = makeConstShiftFunc(c, 66, OpRsh64x64, TypeInt64)
+ fun = makeConstShiftFunc(c, 66, OpRsh64x64, c.config.Types.Int64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
}
-func makeConstShiftFunc(c *Conf, amount int64, op Op, typ Type) fun {
- ptyp := &TypeImpl{Size_: 8, Ptr: true, Name: "ptr"}
+func makeConstShiftFunc(c *Conf, amount int64, op Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("SP", OpSP, TypeUInt64, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.UInt64, 0, nil),
Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
- Valu("c", OpConst64, TypeUInt64, amount, nil),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
Valu("shift", op, typ, 0, nil, "load", "c"),
- Valu("store", OpStore, TypeMem, 0, TypeUInt64, "resptr", "shift", "mem"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "shift", "mem"),
Exit("store")))
Compile(fun.f)
return fun
}
func TestShiftToExtensionAMD64(t *testing.T) {
+ c := testConfig(t)
// Test that eligible pairs of constant shifts are converted to extensions.
// For example:
// (uint64(x) << 32) >> 32 -> uint64(uint32(x))
@@ -58,24 +60,23 @@ func TestShiftToExtensionAMD64(t *testing.T) {
tests := [...]struct {
amount int64
left, right Op
- typ Type
+ typ *types.Type
}{
// unsigned
- {56, OpLsh64x64, OpRsh64Ux64, TypeUInt64},
- {48, OpLsh64x64, OpRsh64Ux64, TypeUInt64},
- {32, OpLsh64x64, OpRsh64Ux64, TypeUInt64},
- {24, OpLsh32x64, OpRsh32Ux64, TypeUInt32},
- {16, OpLsh32x64, OpRsh32Ux64, TypeUInt32},
- {8, OpLsh16x64, OpRsh16Ux64, TypeUInt16},
+ {56, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {48, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {32, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {24, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {16, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {8, OpLsh16x64, OpRsh16Ux64, c.config.Types.UInt16},
// signed
- {56, OpLsh64x64, OpRsh64x64, TypeInt64},
- {48, OpLsh64x64, OpRsh64x64, TypeInt64},
- {32, OpLsh64x64, OpRsh64x64, TypeInt64},
- {24, OpLsh32x64, OpRsh32x64, TypeInt32},
- {16, OpLsh32x64, OpRsh32x64, TypeInt32},
- {8, OpLsh16x64, OpRsh16x64, TypeInt16},
+ {56, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {48, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {32, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {24, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {16, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {8, OpLsh16x64, OpRsh16x64, c.config.Types.Int16},
}
- c := testConfig(t)
for _, tc := range tests {
fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ)
checkOpcodeCounts(t, fun.f, ops)
@@ -87,19 +88,19 @@ func TestShiftToExtensionAMD64(t *testing.T) {
// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
//
// This may be equivalent to a sign or zero extension.
-func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ Type) fun {
- ptyp := &TypeImpl{Size_: 8, Ptr: true, Name: "ptr"}
+func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("SP", OpSP, TypeUInt64, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.UInt64, 0, nil),
Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
- Valu("c", OpConst64, TypeUInt64, amount, nil),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
Valu("lshift", lshift, typ, 0, nil, "load", "c"),
Valu("rshift", rshift, typ, 0, nil, "lshift", "c"),
- Valu("store", OpStore, TypeMem, 0, TypeUInt64, "resptr", "rshift", "mem"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "rshift", "mem"),
Exit("store")))
Compile(fun.f)
return fun
diff --git a/src/cmd/compile/internal/ssa/shortcircuit_test.go b/src/cmd/compile/internal/ssa/shortcircuit_test.go
index e70159d746..b25eeb4740 100644
--- a/src/cmd/compile/internal/ssa/shortcircuit_test.go
+++ b/src/cmd/compile/internal/ssa/shortcircuit_test.go
@@ -4,32 +4,35 @@
package ssa
-import "testing"
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
func TestShortCircuit(t *testing.T) {
c := testConfig(t)
fun := c.Fun("entry",
Bloc("entry",
- Valu("mem", OpInitMem, TypeMem, 0, nil),
- Valu("arg1", OpArg, TypeInt64, 0, nil),
- Valu("arg2", OpArg, TypeInt64, 0, nil),
- Valu("arg3", OpArg, TypeInt64, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, nil),
Goto("b1")),
Bloc("b1",
- Valu("cmp1", OpLess64, TypeBool, 0, nil, "arg1", "arg2"),
+ Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "arg1", "arg2"),
If("cmp1", "b2", "b3")),
Bloc("b2",
- Valu("cmp2", OpLess64, TypeBool, 0, nil, "arg2", "arg3"),
+ Valu("cmp2", OpLess64, c.config.Types.Bool, 0, nil, "arg2", "arg3"),
Goto("b3")),
Bloc("b3",
- Valu("phi2", OpPhi, TypeBool, 0, nil, "cmp1", "cmp2"),
+ Valu("phi2", OpPhi, c.config.Types.Bool, 0, nil, "cmp1", "cmp2"),
If("phi2", "b4", "b5")),
Bloc("b4",
- Valu("cmp3", OpLess64, TypeBool, 0, nil, "arg3", "arg1"),
+ Valu("cmp3", OpLess64, c.config.Types.Bool, 0, nil, "arg3", "arg1"),
Goto("b5")),
Bloc("b5",
- Valu("phi3", OpPhi, TypeBool, 0, nil, "phi2", "cmp3"),
+ Valu("phi3", OpPhi, c.config.Types.Bool, 0, nil, "phi2", "cmp3"),
If("phi3", "b6", "b7")),
Bloc("b6",
Exit("mem")),
diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go
index d49c70a97b..fa68209534 100644
--- a/src/cmd/compile/internal/ssa/sizeof_test.go
+++ b/src/cmd/compile/internal/ssa/sizeof_test.go
@@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Value{}, 72, 120},
+ {Value{}, 68, 112},
{Block{}, 152, 288},
}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 8caf50494e..3b44986eee 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -7,6 +7,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
)
@@ -68,7 +69,7 @@ func putStackAllocState(s *stackAllocState) {
}
type stackValState struct {
- typ Type
+ typ *types.Type
spill *Value
needSlot bool
}
@@ -162,7 +163,7 @@ func (s *stackAllocState) stackalloc() {
// TODO: share slots among equivalent types. We would need to
// only share among types with the same GC signature. See the
// type.Equal calls below for where this matters.
- locations := map[Type][]LocalSlot{}
+ locations := map[*types.Type][]LocalSlot{}
// Each time we assign a stack slot to a value v, we remember
// the slot we used via an index into locations[v.Type].
@@ -204,7 +205,7 @@ func (s *stackAllocState) stackalloc() {
} else {
name = names[v.ID]
}
- if name.N != nil && v.Type.Compare(name.Type) == CMPeq {
+ if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq {
for _, id := range s.interfere[v.ID] {
h := f.getHome(id)
if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
@@ -376,7 +377,7 @@ func (s *stackAllocState) buildInterferenceGraph() {
if s.values[v.ID].needSlot {
live.remove(v.ID)
for _, id := range live.contents() {
- if s.values[v.ID].typ.Compare(s.values[id].typ) == CMPeq {
+ if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq {
s.interfere[v.ID] = append(s.interfere[v.ID], id)
s.interfere[id] = append(s.interfere[id], v.ID)
}
diff --git a/src/cmd/compile/internal/ssa/type.go b/src/cmd/compile/internal/ssa/type.go
deleted file mode 100644
index 0936cc5184..0000000000
--- a/src/cmd/compile/internal/ssa/type.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "cmd/internal/obj"
-
-// TODO: use go/types instead?
-
-// A type interface used to import cmd/internal/gc:Type
-// Type instances are not guaranteed to be canonical.
-type Type interface {
- Size() int64 // return the size in bytes
- Alignment() int64
-
- IsBoolean() bool // is a named or unnamed boolean type
- IsInteger() bool // ... ditto for the others
- IsSigned() bool
- IsFloat() bool
- IsComplex() bool
- IsPtrShaped() bool
- IsString() bool
- IsSlice() bool
- IsArray() bool
- IsStruct() bool
- IsInterface() bool
-
- IsMemory() bool // special ssa-package-only types
- IsFlags() bool
- IsVoid() bool
- IsTuple() bool
-
- ElemType() Type // given []T or *T or [n]T, return T
- PtrTo() Type // given T, return *T
-
- NumFields() int // # of fields of a struct
- FieldType(i int) Type // type of ith field of the struct or ith part of a tuple
- FieldOff(i int) int64 // offset of ith field of the struct
- FieldName(i int) string // name of ith field of the struct
-
- NumElem() int64 // # of elements of an array
-
- HasPointer() bool // has heap pointer
-
- String() string
- SimpleString() string // a coarser generic description of T, e.g. T's underlying type
- Compare(Type) Cmp // compare types, returning one of CMPlt, CMPeq, CMPgt.
- Symbol() *obj.LSym // the symbol of the type
-}
-
-// Special compiler-only types.
-type CompilerType struct {
- Name string
- size int64
- Memory bool
- Flags bool
- Void bool
- Int128 bool
-}
-
-func (t *CompilerType) Size() int64 { return t.size } // Size in bytes
-func (t *CompilerType) Alignment() int64 { return 0 }
-func (t *CompilerType) IsBoolean() bool { return false }
-func (t *CompilerType) IsInteger() bool { return false }
-func (t *CompilerType) IsSigned() bool { return false }
-func (t *CompilerType) IsFloat() bool { return false }
-func (t *CompilerType) IsComplex() bool { return false }
-func (t *CompilerType) IsPtrShaped() bool { return false }
-func (t *CompilerType) IsString() bool { return false }
-func (t *CompilerType) IsSlice() bool { return false }
-func (t *CompilerType) IsArray() bool { return false }
-func (t *CompilerType) IsStruct() bool { return false }
-func (t *CompilerType) IsInterface() bool { return false }
-func (t *CompilerType) IsMemory() bool { return t.Memory }
-func (t *CompilerType) IsFlags() bool { return t.Flags }
-func (t *CompilerType) IsVoid() bool { return t.Void }
-func (t *CompilerType) IsTuple() bool { return false }
-func (t *CompilerType) String() string { return t.Name }
-func (t *CompilerType) SimpleString() string { return t.Name }
-func (t *CompilerType) ElemType() Type { panic("not implemented") }
-func (t *CompilerType) PtrTo() Type { panic("not implemented") }
-func (t *CompilerType) NumFields() int { panic("not implemented") }
-func (t *CompilerType) FieldType(i int) Type { panic("not implemented") }
-func (t *CompilerType) FieldOff(i int) int64 { panic("not implemented") }
-func (t *CompilerType) FieldName(i int) string { panic("not implemented") }
-func (t *CompilerType) NumElem() int64 { panic("not implemented") }
-func (t *CompilerType) HasPointer() bool { panic("not implemented") }
-func (t *CompilerType) Symbol() *obj.LSym { panic("not implemented") }
-
-type TupleType struct {
- first Type
- second Type
- // Any tuple with a memory type must put that memory type second.
-}
-
-func (t *TupleType) Size() int64 { panic("not implemented") }
-func (t *TupleType) Alignment() int64 { panic("not implemented") }
-func (t *TupleType) IsBoolean() bool { return false }
-func (t *TupleType) IsInteger() bool { return false }
-func (t *TupleType) IsSigned() bool { return false }
-func (t *TupleType) IsFloat() bool { return false }
-func (t *TupleType) IsComplex() bool { return false }
-func (t *TupleType) IsPtrShaped() bool { return false }
-func (t *TupleType) IsString() bool { return false }
-func (t *TupleType) IsSlice() bool { return false }
-func (t *TupleType) IsArray() bool { return false }
-func (t *TupleType) IsStruct() bool { return false }
-func (t *TupleType) IsInterface() bool { return false }
-func (t *TupleType) IsMemory() bool { return false }
-func (t *TupleType) IsFlags() bool { return false }
-func (t *TupleType) IsVoid() bool { return false }
-func (t *TupleType) IsTuple() bool { return true }
-func (t *TupleType) String() string { return t.first.String() + "," + t.second.String() }
-func (t *TupleType) SimpleString() string { return "Tuple" }
-func (t *TupleType) ElemType() Type { panic("not implemented") }
-func (t *TupleType) PtrTo() Type { panic("not implemented") }
-func (t *TupleType) NumFields() int { panic("not implemented") }
-func (t *TupleType) FieldType(i int) Type {
- switch i {
- case 0:
- return t.first
- case 1:
- return t.second
- default:
- panic("bad tuple index")
- }
-}
-func (t *TupleType) FieldOff(i int) int64 { panic("not implemented") }
-func (t *TupleType) FieldName(i int) string { panic("not implemented") }
-func (t *TupleType) NumElem() int64 { panic("not implemented") }
-func (t *TupleType) HasPointer() bool { panic("not implemented") }
-func (t *TupleType) Symbol() *obj.LSym { panic("not implemented") }
-
-// Cmp is a comparison between values a and b.
-// -1 if a < b
-// 0 if a == b
-// 1 if a > b
-type Cmp int8
-
-const (
- CMPlt = Cmp(-1)
- CMPeq = Cmp(0)
- CMPgt = Cmp(1)
-)
-
-func (t *CompilerType) Compare(u Type) Cmp {
- x, ok := u.(*CompilerType)
- // ssa.CompilerType is smaller than any other type
- if !ok {
- return CMPlt
- }
- if t == x {
- return CMPeq
- }
- // desire fast sorting, not pretty sorting.
- if len(t.Name) == len(x.Name) {
- if t.Name == x.Name {
- return CMPeq
- }
- if t.Name < x.Name {
- return CMPlt
- }
- return CMPgt
- }
- if len(t.Name) > len(x.Name) {
- return CMPgt
- }
- return CMPlt
-}
-
-func (t *TupleType) Compare(u Type) Cmp {
- // ssa.TupleType is greater than ssa.CompilerType
- if _, ok := u.(*CompilerType); ok {
- return CMPgt
- }
- // ssa.TupleType is smaller than any other type
- x, ok := u.(*TupleType)
- if !ok {
- return CMPlt
- }
- if t == x {
- return CMPeq
- }
- if c := t.first.Compare(x.first); c != CMPeq {
- return c
- }
- return t.second.Compare(x.second)
-}
-
-var (
- TypeInvalid = &CompilerType{Name: "invalid"}
- TypeMem = &CompilerType{Name: "mem", Memory: true}
- TypeFlags = &CompilerType{Name: "flags", Flags: true}
- TypeVoid = &CompilerType{Name: "void", Void: true}
- TypeInt128 = &CompilerType{Name: "int128", size: 16, Int128: true}
-)
-
-func MakeTuple(t0, t1 Type) *TupleType {
- return &TupleType{first: t0, second: t1}
-}
diff --git a/src/cmd/compile/internal/ssa/type_test.go b/src/cmd/compile/internal/ssa/type_test.go
deleted file mode 100644
index 90958995ce..0000000000
--- a/src/cmd/compile/internal/ssa/type_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "cmd/internal/obj"
-
-// Stub implementation used for testing.
-type TypeImpl struct {
- Size_ int64
- Align int64
- Boolean bool
- Integer bool
- Signed bool
- Float bool
- Complex bool
- Ptr bool
- string bool
- slice bool
- array bool
- struct_ bool
- inter bool
- Elem_ Type
-
- Name string
-}
-
-func (t *TypeImpl) Size() int64 { return t.Size_ }
-func (t *TypeImpl) Alignment() int64 { return t.Align }
-func (t *TypeImpl) IsBoolean() bool { return t.Boolean }
-func (t *TypeImpl) IsInteger() bool { return t.Integer }
-func (t *TypeImpl) IsSigned() bool { return t.Signed }
-func (t *TypeImpl) IsFloat() bool { return t.Float }
-func (t *TypeImpl) IsComplex() bool { return t.Complex }
-func (t *TypeImpl) IsPtrShaped() bool { return t.Ptr }
-func (t *TypeImpl) IsString() bool { return t.string }
-func (t *TypeImpl) IsSlice() bool { return t.slice }
-func (t *TypeImpl) IsArray() bool { return t.array }
-func (t *TypeImpl) IsStruct() bool { return t.struct_ }
-func (t *TypeImpl) IsInterface() bool { return t.inter }
-func (t *TypeImpl) IsMemory() bool { return false }
-func (t *TypeImpl) IsFlags() bool { return false }
-func (t *TypeImpl) IsTuple() bool { return false }
-func (t *TypeImpl) IsVoid() bool { return false }
-func (t *TypeImpl) String() string { return t.Name }
-func (t *TypeImpl) SimpleString() string { return t.Name }
-func (t *TypeImpl) ElemType() Type { return t.Elem_ }
-func (t *TypeImpl) PtrTo() Type { return TypeBytePtr }
-func (t *TypeImpl) NumFields() int { panic("not implemented") }
-func (t *TypeImpl) FieldType(i int) Type { panic("not implemented") }
-func (t *TypeImpl) FieldOff(i int) int64 { panic("not implemented") }
-func (t *TypeImpl) FieldName(i int) string { panic("not implemented") }
-func (t *TypeImpl) NumElem() int64 { panic("not implemented") }
-func (t *TypeImpl) HasPointer() bool { return t.Ptr }
-func (t *TypeImpl) Symbol() *obj.LSym { panic("not implemented") }
-
-func (t *TypeImpl) Equal(u Type) bool {
- x, ok := u.(*TypeImpl)
- if !ok {
- return false
- }
- return x == t
-}
-
-func (t *TypeImpl) Compare(u Type) Cmp {
- x, ok := u.(*TypeImpl)
- // ssa.CompilerType < ssa.TypeImpl < gc.Type
- if !ok {
- _, ok := u.(*CompilerType)
- if ok {
- return CMPgt
- }
- return CMPlt
- }
- if t == x {
- return CMPeq
- }
- if t.Name < x.Name {
- return CMPlt
- }
- if t.Name > x.Name {
- return CMPgt
- }
- return CMPeq
-
-}
-
-var (
- // shortcuts for commonly used basic types
- TypeInt8 = &TypeImpl{Size_: 1, Align: 1, Integer: true, Signed: true, Name: "int8"}
- TypeInt16 = &TypeImpl{Size_: 2, Align: 2, Integer: true, Signed: true, Name: "int16"}
- TypeInt32 = &TypeImpl{Size_: 4, Align: 4, Integer: true, Signed: true, Name: "int32"}
- TypeInt64 = &TypeImpl{Size_: 8, Align: 8, Integer: true, Signed: true, Name: "int64"}
- TypeFloat32 = &TypeImpl{Size_: 4, Align: 4, Float: true, Name: "float32"}
- TypeFloat64 = &TypeImpl{Size_: 8, Align: 8, Float: true, Name: "float64"}
- TypeComplex64 = &TypeImpl{Size_: 8, Align: 4, Complex: true, Name: "complex64"}
- TypeComplex128 = &TypeImpl{Size_: 16, Align: 8, Complex: true, Name: "complex128"}
- TypeUInt8 = &TypeImpl{Size_: 1, Align: 1, Integer: true, Name: "uint8"}
- TypeUInt16 = &TypeImpl{Size_: 2, Align: 2, Integer: true, Name: "uint16"}
- TypeUInt32 = &TypeImpl{Size_: 4, Align: 4, Integer: true, Name: "uint32"}
- TypeUInt64 = &TypeImpl{Size_: 8, Align: 8, Integer: true, Name: "uint64"}
- TypeBool = &TypeImpl{Size_: 1, Align: 1, Boolean: true, Name: "bool"}
- TypeBytePtr = &TypeImpl{Size_: 8, Align: 8, Ptr: true, Name: "*byte"}
- TypeInt64Ptr = &TypeImpl{Size_: 8, Align: 8, Ptr: true, Name: "*int64"}
-)
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index 992561c674..84634484ce 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
@@ -24,7 +25,7 @@ type Value struct {
// The type of this value. Normally this will be a Go type, but there
// are a few other pseudo-types, see type.go.
- Type Type
+ Type *types.Type
// Auxiliary info for this value. The type of this information depends on the opcode and type.
// AuxInt is used for integer values, Aux is used for other values.
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index af397bed71..7aec598f6f 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
)
@@ -12,7 +13,7 @@ import (
// needwb returns whether we need write barrier for store op v.
// v must be Store/Move/Zero.
func needwb(v *Value) bool {
- t, ok := v.Aux.(Type)
+ t, ok := v.Aux.(*types.Type)
if !ok {
v.Fatalf("store aux is not a type: %s", v.LongString())
}
@@ -155,9 +156,9 @@ func writebarrier(f *Func) {
// set up control flow for write barrier test
// load word, test word, avoiding partial register write from load byte.
- types := &f.Config.Types
- flag := b.NewValue2(pos, OpLoad, types.UInt32, wbaddr, mem)
- flag = b.NewValue2(pos, OpNeq32, types.Bool, flag, const0)
+ cfgtypes := &f.Config.Types
+ flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
+ flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
b.Kind = BlockIf
b.SetControl(flag)
b.Likely = BranchUnlikely
@@ -185,10 +186,10 @@ func writebarrier(f *Func) {
case OpMoveWB:
fn = typedmemmove
val = w.Args[1]
- typ = &ExternSymbol{Sym: w.Aux.(Type).Symbol()}
+ typ = &ExternSymbol{Sym: w.Aux.(*types.Type).Symbol()}
case OpZeroWB:
fn = typedmemclr
- typ = &ExternSymbol{Sym: w.Aux.(Type).Symbol()}
+ typ = &ExternSymbol{Sym: w.Aux.(*types.Type).Symbol()}
}
// then block: emit write barrier call
@@ -198,12 +199,12 @@ func writebarrier(f *Func) {
// else block: normal store
switch w.Op {
case OpStoreWB:
- memElse = bElse.NewValue3A(pos, OpStore, TypeMem, w.Aux, ptr, val, memElse)
+ memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse)
case OpMoveWB:
- memElse = bElse.NewValue3I(pos, OpMove, TypeMem, w.AuxInt, ptr, val, memElse)
+ memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse)
memElse.Aux = w.Aux
case OpZeroWB:
- memElse = bElse.NewValue2I(pos, OpZero, TypeMem, w.AuxInt, ptr, memElse)
+ memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse)
memElse.Aux = w.Aux
}
@@ -223,7 +224,7 @@ func writebarrier(f *Func) {
bEnd.Values = append(bEnd.Values, last)
last.Block = bEnd
last.reset(OpPhi)
- last.Type = TypeMem
+ last.Type = types.TypeMem
last.AddArg(memThen)
last.AddArg(memElse)
for _, w := range stores {
@@ -265,10 +266,10 @@ func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ *ExternSymbol, ptr, val, m
t := val.Type.ElemType()
tmp = b.Func.fe.Auto(val.Pos, t)
aux := &AutoSymbol{Node: tmp}
- mem = b.NewValue1A(pos, OpVarDef, TypeMem, tmp, mem)
+ mem = b.NewValue1A(pos, OpVarDef, types.TypeMem, tmp, mem)
tmpaddr := b.NewValue1A(pos, OpAddr, t.PtrTo(), aux, sp)
siz := t.Size()
- mem = b.NewValue3I(pos, OpMove, TypeMem, siz, tmpaddr, val, mem)
+ mem = b.NewValue3I(pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem)
mem.Aux = t
val = tmpaddr
}
@@ -280,29 +281,29 @@ func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ *ExternSymbol, ptr, val, m
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
off = round(off, taddr.Type.Alignment())
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
- mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, taddr, mem)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
off += taddr.Type.Size()
}
off = round(off, ptr.Type.Alignment())
arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
- mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, ptr, mem)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
off += ptr.Type.Size()
if val != nil {
off = round(off, val.Type.Alignment())
arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
- mem = b.NewValue3A(pos, OpStore, TypeMem, val.Type, arg, val, mem)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
off += val.Type.Size()
}
off = round(off, config.PtrSize)
// issue call
- mem = b.NewValue1A(pos, OpStaticCall, TypeMem, fn, mem)
+ mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, fn, mem)
mem.AuxInt = off - config.ctxt.FixedFrameSize()
if valIsVolatile {
- mem = b.NewValue1A(pos, OpVarKill, TypeMem, tmp, mem) // mark temp dead
+ mem = b.NewValue1A(pos, OpVarKill, types.TypeMem, tmp, mem) // mark temp dead
}
return mem
diff --git a/src/cmd/compile/internal/ssa/writebarrier_test.go b/src/cmd/compile/internal/ssa/writebarrier_test.go
index e26346de3c..c1f9ec7fc1 100644
--- a/src/cmd/compile/internal/ssa/writebarrier_test.go
+++ b/src/cmd/compile/internal/ssa/writebarrier_test.go
@@ -4,21 +4,24 @@
package ssa
-import "testing"
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
func TestWriteBarrierStoreOrder(t *testing.T) {
// Make sure writebarrier phase works even StoreWB ops are not in dependency order
c := testConfig(t)
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("sp", OpSP, TypeInvalid, 0, nil),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("sp", OpSP, types.TypeInvalid, 0, nil),
Valu("v", OpConstNil, ptrType, 0, nil),
Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
- Valu("wb2", OpStore, TypeMem, 0, ptrType, "addr1", "v", "wb1"),
- Valu("wb1", OpStore, TypeMem, 0, ptrType, "addr1", "v", "start"), // wb1 and wb2 are out of order
+ Valu("wb2", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "wb1"),
+ Valu("wb1", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "start"), // wb1 and wb2 are out of order
Goto("exit")),
Bloc("exit",
Exit("wb2")))
@@ -33,18 +36,18 @@ func TestWriteBarrierPhi(t *testing.T) {
// a Phi op takes the store in the same block as argument.
// See issue #19067.
c := testConfig(t)
- ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
+ ptrType := c.config.Types.BytePtr
fun := c.Fun("entry",
Bloc("entry",
- Valu("start", OpInitMem, TypeMem, 0, nil),
- Valu("sb", OpSB, TypeInvalid, 0, nil),
- Valu("sp", OpSP, TypeInvalid, 0, nil),
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.TypeInvalid, 0, nil),
+ Valu("sp", OpSP, types.TypeInvalid, 0, nil),
Goto("loop")),
Bloc("loop",
- Valu("phi", OpPhi, TypeMem, 0, nil, "start", "wb"),
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "wb"),
Valu("v", OpConstNil, ptrType, 0, nil),
Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
- Valu("wb", OpStore, TypeMem, 0, ptrType, "addr", "v", "phi"), // has write barrier
+ Valu("wb", OpStore, types.TypeMem, 0, ptrType, "addr", "v", "phi"), // has write barrier
Goto("loop")))
CheckFunc(fun.f)
diff --git a/src/cmd/compile/internal/ssa/zcse.go b/src/cmd/compile/internal/ssa/zcse.go
index 16d5c10331..44688d9573 100644
--- a/src/cmd/compile/internal/ssa/zcse.go
+++ b/src/cmd/compile/internal/ssa/zcse.go
@@ -4,6 +4,8 @@
package ssa
+import "cmd/compile/internal/types"
+
// zcse does an initial pass of common-subexpression elimination on the
// function for values with zero arguments to allow the more expensive cse
// to begin with a reduced number of values. Values are just relinked,
@@ -61,7 +63,7 @@ type vkey struct {
op Op
ai int64 // aux int
ax interface{} // aux
- t Type // type
+ t *types.Type // type
}
// keyFor returns the AuxInt portion of a key structure uniquely identifying a
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index b0be122d0a..6f2f574b39 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -5,7 +5,6 @@
package types
import (
- "cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
@@ -69,6 +68,10 @@ const (
// pseudo-types for import/export
TDDDFIELD // wrapper: contained type is a ... field
+ // SSA backend types
+ TSSA // internal types used by SSA backend (flags, memory, etc.)
+ TTUPLE // a pair of types, used by SSA backend
+
NTYPE
)
@@ -295,6 +298,12 @@ func (t *Type) ChanType() *Chan {
return t.Extra.(*Chan)
}
+type Tuple struct {
+ first *Type
+ second *Type
+ // Any tuple with a memory type must put that memory type second.
+}
+
// Array contains Type fields specific to array types.
type Array struct {
Elem *Type // element type
@@ -425,6 +434,8 @@ func New(et EType) *Type {
t.Extra = DDDField{}
case TCHAN:
t.Extra = new(Chan)
+ case TTUPLE:
+ t.Extra = new(Tuple)
}
return t
}
@@ -472,6 +483,19 @@ func NewChan(elem *Type, dir ChanDir) *Type {
return t
}
+func NewTuple(t1, t2 *Type) *Type {
+ t := New(TTUPLE)
+ t.Extra.(*Tuple).first = t1
+ t.Extra.(*Tuple).second = t2
+ return t
+}
+
+func newSSA(name string) *Type {
+ t := New(TSSA)
+ t.Extra = name
+ return t
+}
+
// NewMap returns a new map Type with key type k and element (aka value) type v.
func NewMap(k, v *Type) *Type {
t := New(TMAP)
@@ -658,6 +682,8 @@ func (t *Type) Copy() *Type {
case TARRAY:
x := *t.Extra.(*Array)
nt.Extra = &x
+ case TTUPLE, TSSA:
+ Fatalf("ssa types cannot be copied")
}
// TODO(mdempsky): Find out why this is necessary and explain.
if t.Orig == t {
@@ -857,6 +883,12 @@ func (t *Type) ArgWidth() int64 {
}
func (t *Type) Size() int64 {
+ if t.Etype == TSSA {
+ if t == TypeInt128 {
+ return 16
+ }
+ return 0
+ }
Dowidth(t)
return t.Width
}
@@ -870,41 +902,47 @@ func (t *Type) SimpleString() string {
return t.Etype.String()
}
+// Cmp is a comparison between values a and b.
+// -1 if a < b
+// 0 if a == b
+// 1 if a > b
+type Cmp int8
+
+const (
+ CMPlt = Cmp(-1)
+ CMPeq = Cmp(0)
+ CMPgt = Cmp(1)
+)
+
// Compare compares types for purposes of the SSA back
-// end, returning an ssa.Cmp (one of CMPlt, CMPeq, CMPgt).
+// end, returning a Cmp (one of CMPlt, CMPeq, CMPgt).
// The answers are correct for an optimizer
// or code generator, but not necessarily typechecking.
// The order chosen is arbitrary, only consistency and division
// into equivalence classes (Types that compare CMPeq) matters.
-func (t *Type) Compare(u ssa.Type) ssa.Cmp {
- x, ok := u.(*Type)
- // ssa.CompilerType is smaller than gc.Type
- // bare pointer equality is easy.
- if !ok {
- return ssa.CMPgt
- }
+func (t *Type) Compare(x *Type) Cmp {
if x == t {
- return ssa.CMPeq
+ return CMPeq
}
return t.cmp(x)
}
-func cmpForNe(x bool) ssa.Cmp {
+func cmpForNe(x bool) Cmp {
if x {
- return ssa.CMPlt
+ return CMPlt
}
- return ssa.CMPgt
+ return CMPgt
}
-func (r *Sym) cmpsym(s *Sym) ssa.Cmp {
+func (r *Sym) cmpsym(s *Sym) Cmp {
if r == s {
- return ssa.CMPeq
+ return CMPeq
}
if r == nil {
- return ssa.CMPlt
+ return CMPlt
}
if s == nil {
- return ssa.CMPgt
+ return CMPgt
}
// Fast sort, not pretty sort
if len(r.Name) != len(s.Name) {
@@ -921,28 +959,28 @@ func (r *Sym) cmpsym(s *Sym) ssa.Cmp {
if r.Name != s.Name {
return cmpForNe(r.Name < s.Name)
}
- return ssa.CMPeq
+ return CMPeq
}
-// cmp compares two *Types t and x, returning ssa.CMPlt,
-// ssa.CMPeq, ssa.CMPgt as t<x, t==x, t>x, for an arbitrary
+// cmp compares two *Types t and x, returning CMPlt,
+// CMPeq, CMPgt as t<x, t==x, t>x, for an arbitrary
// and optimizer-centric notion of comparison.
// TODO(josharian): make this safe for recursive interface types
// and use in signatlist sorting. See issue 19869.
-func (t *Type) cmp(x *Type) ssa.Cmp {
+func (t *Type) cmp(x *Type) Cmp {
// This follows the structure of eqtype in subr.go
// with two exceptions.
// 1. Symbols are compared more carefully because a <,=,> result is desired.
// 2. Maps are treated specially to avoid endless recursion -- maps
// contain an internal data type not expressible in Go source code.
if t == x {
- return ssa.CMPeq
+ return CMPeq
}
if t == nil {
- return ssa.CMPlt
+ return CMPlt
}
if x == nil {
- return ssa.CMPgt
+ return CMPgt
}
if t.Etype != x.Etype {
@@ -955,17 +993,17 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
switch t.Etype {
case TUINT8:
if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) {
- return ssa.CMPeq
+ return CMPeq
}
case TINT32:
if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) {
- return ssa.CMPeq
+ return CMPeq
}
}
}
- if c := t.Sym.cmpsym(x.Sym); c != ssa.CMPeq {
+ if c := t.Sym.cmpsym(x.Sym); c != CMPeq {
return c
}
@@ -974,19 +1012,43 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
if t.Vargen != x.Vargen {
return cmpForNe(t.Vargen < x.Vargen)
}
- return ssa.CMPeq
+ return CMPeq
}
// both syms nil, look at structure below.
switch t.Etype {
case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
- return ssa.CMPeq
- }
+ return CMPeq
+
+ case TSSA:
+ tname := t.Extra.(string)
+ xname := t.Extra.(string)
+ // desire fast sorting, not pretty sorting.
+ if len(tname) == len(xname) {
+ if tname == xname {
+ return CMPeq
+ }
+ if tname < xname {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ if len(tname) > len(xname) {
+ return CMPgt
+ }
+ return CMPlt
+
+ case TTUPLE:
+ xtup := x.Extra.(*Tuple)
+ ttup := t.Extra.(*Tuple)
+ if c := ttup.first.Compare(xtup.first); c != CMPeq {
+ return c
+ }
+ return ttup.second.Compare(xtup.second)
- switch t.Etype {
case TMAP:
- if c := t.Key().cmp(x.Key()); c != ssa.CMPeq {
+ if c := t.Key().cmp(x.Key()); c != CMPeq {
return c
}
return t.Val().cmp(x.Val())
@@ -998,20 +1060,20 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
case TSTRUCT:
if t.StructType().Map == nil {
if x.StructType().Map != nil {
- return ssa.CMPlt // nil < non-nil
+ return CMPlt // nil < non-nil
}
// to the fallthrough
} else if x.StructType().Map == nil {
- return ssa.CMPgt // nil > non-nil
+ return CMPgt // nil > non-nil
} else if t.StructType().Map.MapType().Bucket == t {
// Both have non-nil Map
// Special case for Maps which include a recursive type where the recursion is not broken with a named type
if x.StructType().Map.MapType().Bucket != x {
- return ssa.CMPlt // bucket maps are least
+ return CMPlt // bucket maps are least
}
return t.StructType().Map.cmp(x.StructType().Map)
} else if x.StructType().Map.MapType().Bucket == x {
- return ssa.CMPgt // bucket maps are least
+ return CMPgt // bucket maps are least
} // If t != t.Map.Bucket, fall through to general case
tfs := t.FieldSlice()
@@ -1024,34 +1086,34 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
if t1.Note != x1.Note {
return cmpForNe(t1.Note < x1.Note)
}
- if c := t1.Sym.cmpsym(x1.Sym); c != ssa.CMPeq {
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
return c
}
- if c := t1.Type.cmp(x1.Type); c != ssa.CMPeq {
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
return c
}
}
if len(tfs) != len(xfs) {
return cmpForNe(len(tfs) < len(xfs))
}
- return ssa.CMPeq
+ return CMPeq
case TINTER:
tfs := t.FieldSlice()
xfs := x.FieldSlice()
for i := 0; i < len(tfs) && i < len(xfs); i++ {
t1, x1 := tfs[i], xfs[i]
- if c := t1.Sym.cmpsym(x1.Sym); c != ssa.CMPeq {
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
return c
}
- if c := t1.Type.cmp(x1.Type); c != ssa.CMPeq {
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
return c
}
}
if len(tfs) != len(xfs) {
return cmpForNe(len(tfs) < len(xfs))
}
- return ssa.CMPeq
+ return CMPeq
case TFUNC:
for _, f := range RecvsParamsResults {
@@ -1064,7 +1126,7 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
if ta.Isddd() != tb.Isddd() {
return cmpForNe(!ta.Isddd())
}
- if c := ta.Type.cmp(tb.Type); c != ssa.CMPeq {
+ if c := ta.Type.cmp(tb.Type); c != CMPeq {
return c
}
}
@@ -1072,7 +1134,7 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
return cmpForNe(len(tfs) < len(xfs))
}
}
- return ssa.CMPeq
+ return CMPeq
case TARRAY:
if t.NumElem() != x.NumElem() {
@@ -1202,19 +1264,29 @@ func (t *Type) IsEmptyInterface() bool {
return t.IsInterface() && t.NumFields() == 0
}
-func (t *Type) ElemType() ssa.Type {
+func (t *Type) ElemType() *Type {
// TODO(josharian): If Type ever moves to a shared
// internal package, remove this silly wrapper.
return t.Elem()
}
-func (t *Type) PtrTo() ssa.Type {
+func (t *Type) PtrTo() *Type {
return NewPtr(t)
}
func (t *Type) NumFields() int {
return t.Fields().Len()
}
-func (t *Type) FieldType(i int) ssa.Type {
+func (t *Type) FieldType(i int) *Type {
+ if t.Etype == TTUPLE {
+ switch i {
+ case 0:
+ return t.Extra.(*Tuple).first
+ case 1:
+ return t.Extra.(*Tuple).second
+ default:
+ panic("bad tuple index")
+ }
+ }
return t.Field(i).Type
}
func (t *Type) FieldOff(i int) int64 {
@@ -1252,10 +1324,10 @@ func (t *Type) ChanDir() ChanDir {
return t.Extra.(*Chan).Dir
}
-func (t *Type) IsMemory() bool { return false }
-func (t *Type) IsFlags() bool { return false }
-func (t *Type) IsVoid() bool { return false }
-func (t *Type) IsTuple() bool { return false }
+func (t *Type) IsMemory() bool { return t == TypeMem }
+func (t *Type) IsFlags() bool { return t == TypeFlags }
+func (t *Type) IsVoid() bool { return t == TypeVoid }
+func (t *Type) IsTuple() bool { return t.Etype == TTUPLE }
// IsUntyped reports whether t is an untyped type.
func (t *Type) IsUntyped() bool {
@@ -1332,3 +1404,11 @@ func FakeRecvType() *Type {
}
return recvType
}
+
+var (
+ TypeInvalid *Type = newSSA("invalid")
+ TypeMem *Type = newSSA("mem")
+ TypeFlags *Type = newSSA("flags")
+ TypeVoid *Type = newSSA("void")
+ TypeInt128 *Type = newSSA("int128")
+)
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
index 5fb4021065..796cd449de 100644
--- a/src/cmd/compile/internal/types/utils.go
+++ b/src/cmd/compile/internal/types/utils.go
@@ -114,6 +114,8 @@ var etnames = []string{
TFUNCARGS: "TFUNCARGS",
TCHANARGS: "TCHANARGS",
TDDDFIELD: "TDDDFIELD",
+ TSSA: "TSSA",
+ TTUPLE: "TTUPLE",
}
func (et EType) String() string {
diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go
index 92b385c3cc..5bf14109b2 100644
--- a/src/cmd/compile/internal/x86/387.go
+++ b/src/cmd/compile/internal/x86/387.go
@@ -7,6 +7,7 @@ package x86
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"math"
@@ -323,7 +324,7 @@ func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
}
// loadPush returns the opcode for load+push of the given type.
-func loadPush(t ssa.Type) obj.As {
+func loadPush(t *types.Type) obj.As {
if t.Size() == 4 {
return x86.AFMOVF
}
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index 3822272273..54a76bda2f 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
@@ -38,7 +39,7 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
+func loadByType(t *types.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
@@ -52,7 +53,7 @@ func loadByType(t ssa.Type) obj.As {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
+func storeByType(t *types.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
@@ -75,7 +76,7 @@ func storeByType(t ssa.Type) obj.As {
}
// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) obj.As {
+func moveByType(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4: