aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/rewriteMIPS.go
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2017-04-28 14:12:28 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2017-05-09 23:01:51 +0000
commit46b88c9fbccad489aed53a77afa680792fff6eaa (patch)
treea1fcd0d8b8134f4be9eaf2a3bb16e8cc813d8724 /src/cmd/compile/internal/ssa/rewriteMIPS.go
parent6a24b2d0c1f5a8e2fa5ddb56db5a207671e8beb8 (diff)
downloadgo-46b88c9fbccad489aed53a77afa680792fff6eaa.tar.gz
go-46b88c9fbccad489aed53a77afa680792fff6eaa.zip
cmd/compile: change ssa.Type into *types.Type
When package ssa was created, Type was in package gc. To avoid circular dependencies, we used an interface (ssa.Type) to represent type information in SSA. In the Go 1.9 cycle, gri extricated the Type type from package gc. As a result, we can now use it in package ssa. Now, instead of package types depending on package ssa, it is the other way. This is a more sensible dependency tree, and helps compiler performance a bit. Though this is a big CL, most of the changes are mechanical and uninteresting. Interesting bits: * Add new singleton globals to package types for the special SSA types Memory, Void, Invalid, Flags, and Int128. * Add two new Types, TSSA for the special types, and TTUPLE, for SSA tuple types. ssa.MakeTuple is now types.NewTuple. * Move type comparison result constants CMPlt, CMPeq, and CMPgt to package types. * We had picked the name "types" in our rules for the handy list of types provided by ssa.Config. That conflicted with the types package name, so change it to "typ". * Update the type comparison routine to handle tuples and special types inline. * Teach gc/fmt.go how to print special types. * We can now eliminate ElemTypes in favor of just Elem, and probably also some other duplicated Type methods designed to return ssa.Type instead of *types.Type. * The ssa tests were using their own dummy types, and they were not particularly careful about types in general. Of necessity, this CL switches them to use *types.Type; it does not make them more type-accurate. Unfortunately, using types.Type means initializing a bit of the types universe. This is prime for refactoring and improvement. This shrinks ssa.Value; it now fits in a smaller size class on 64 bit systems. This doesn't have a giant impact, though, since most Values are preallocated in a chunk. name old alloc/op new alloc/op delta Template 37.9MB ± 0% 37.7MB ± 0% -0.57% (p=0.000 n=10+8) Unicode 28.9MB ± 0% 28.7MB ± 0% -0.52% (p=0.000 n=10+10) GoTypes 110MB ± 0% 109MB ± 0% -0.88% (p=0.000 n=10+10) Flate 24.7MB ± 0% 24.6MB ± 0% -0.66% (p=0.000 n=10+10) GoParser 31.1MB ± 0% 30.9MB ± 0% -0.61% (p=0.000 n=10+9) Reflect 73.9MB ± 0% 73.4MB ± 0% -0.62% (p=0.000 n=10+8) Tar 25.8MB ± 0% 25.6MB ± 0% -0.77% (p=0.000 n=9+10) XML 41.2MB ± 0% 40.9MB ± 0% -0.80% (p=0.000 n=10+10) [Geo mean] 40.5MB 40.3MB -0.68% name old allocs/op new allocs/op delta Template 385k ± 0% 386k ± 0% ~ (p=0.356 n=10+9) Unicode 343k ± 1% 344k ± 0% ~ (p=0.481 n=10+10) GoTypes 1.16M ± 0% 1.16M ± 0% -0.16% (p=0.004 n=10+10) Flate 238k ± 1% 238k ± 1% ~ (p=0.853 n=10+10) GoParser 320k ± 0% 320k ± 0% ~ (p=0.720 n=10+9) Reflect 957k ± 0% 957k ± 0% ~ (p=0.460 n=10+8) Tar 252k ± 0% 252k ± 0% ~ (p=0.133 n=9+10) XML 400k ± 0% 400k ± 0% ~ (p=0.796 n=10+10) [Geo mean] 428k 428k -0.01% Removing all the interface calls helps non-trivially with CPU, though. name old time/op new time/op delta Template 178ms ± 4% 173ms ± 3% -2.90% (p=0.000 n=94+96) Unicode 85.0ms ± 4% 83.9ms ± 4% -1.23% (p=0.000 n=96+96) GoTypes 543ms ± 3% 528ms ± 3% -2.73% (p=0.000 n=98+96) Flate 116ms ± 3% 113ms ± 4% -2.34% (p=0.000 n=96+99) GoParser 144ms ± 3% 140ms ± 4% -2.80% (p=0.000 n=99+97) Reflect 344ms ± 3% 334ms ± 4% -3.02% (p=0.000 n=100+99) Tar 106ms ± 5% 103ms ± 4% -3.30% (p=0.000 n=98+94) XML 198ms ± 5% 192ms ± 4% -2.88% (p=0.000 n=92+95) [Geo mean] 178ms 173ms -2.65% name old user-time/op new user-time/op delta Template 229ms ± 5% 224ms ± 5% -2.36% (p=0.000 n=95+99) Unicode 107ms ± 6% 106ms ± 5% -1.13% (p=0.001 n=93+95) GoTypes 696ms ± 4% 679ms ± 4% -2.45% (p=0.000 n=97+99) Flate 137ms ± 4% 134ms ± 5% -2.66% (p=0.000 n=99+96) GoParser 176ms ± 5% 172ms ± 8% -2.27% (p=0.000 n=98+100) Reflect 430ms ± 6% 411ms ± 5% -4.46% (p=0.000 n=100+92) Tar 128ms ±13% 123ms ±13% -4.21% (p=0.000 n=100+100) XML 239ms ± 6% 233ms ± 6% -2.50% (p=0.000 n=95+97) [Geo mean] 220ms 213ms -2.76% Change-Id: I15c7d6268347f8358e75066dfdbd77db24e8d0c1 Reviewed-on: https://go-review.googlesource.com/42145 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewriteMIPS.go')
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go1222
1 files changed, 612 insertions, 610 deletions
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index bed923b7e9..6c36976e26 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -6,10 +6,12 @@ package ssa
import "math"
import "cmd/internal/obj"
import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
var _ = math.MinInt8 // in case not otherwise used
var _ = obj.ANOP // in case not otherwise used
var _ = objabi.GOROOT // in case not otherwise used
+var _ = types.TypeMem // in case not otherwise used
func rewriteValueMIPS(v *Value) bool {
switch v.Op {
@@ -689,11 +691,11 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))))) mem)
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -702,34 +704,34 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSOR, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(val)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v6.AuxInt = 3
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg(v5)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpMIPSNORconst, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
v7.AuxInt = 0
- v8 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v9.AuxInt = 0xff
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v10.AuxInt = 3
- v11 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v11.AuxInt = 3
v11.AddArg(ptr)
v10.AddArg(v11)
@@ -742,7 +744,7 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicAnd (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) (NORconst [0] <types.UInt32> (SLL <types.UInt32> (MOVWconst [0xff]) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -751,39 +753,39 @@ func rewriteValueMIPS_OpAtomicAnd8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSOR, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(val)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v6.AuxInt = 3
- v7 := b.NewValue0(v.Pos, OpMIPSXORconst, types.UInt32)
+ v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
v7.AuxInt = 3
v7.AddArg(ptr)
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg(v5)
v2.AddArg(v3)
- v8 := b.NewValue0(v.Pos, OpMIPSNORconst, types.UInt32)
+ v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
v8.AuxInt = 0
- v9 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v10.AuxInt = 0xff
v9.AddArg(v10)
- v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v11.AuxInt = 3
- v12 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v12 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v12.AuxInt = 3
- v13 := b.NewValue0(v.Pos, OpMIPSXORconst, types.UInt32)
+ v13 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
v13.AuxInt = 3
v13.AddArg(ptr)
v12.AddArg(v13)
@@ -860,11 +862,11 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (AtomicOr8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] ptr))) mem)
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -873,19 +875,19 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(val)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v4.AuxInt = 3
- v5 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v5.AuxInt = 3
v5.AddArg(ptr)
v4.AddArg(v5)
@@ -896,7 +898,7 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
}
// match: (AtomicOr8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicOr (AND <types.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <types.UInt32> (ZeroExt8to32 val) (SLLconst <types.UInt32> [3] (ANDconst <types.UInt32> [3] (XORconst <types.UInt32> [3] ptr)))) mem)
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
@@ -905,21 +907,21 @@ func rewriteValueMIPS_OpAtomicOr8_0(v *Value) bool {
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, types.UInt32Ptr)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSSLL, types.UInt32)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(val)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v4.AuxInt = 3
- v5 := b.NewValue0(v.Pos, OpMIPSANDconst, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSXORconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
v6.AuxInt = 3
v6.AddArg(ptr)
v5.AddArg(v6)
@@ -986,8 +988,8 @@ func rewriteValueMIPS_OpAvg32u_0(v *Value) bool {
func rewriteValueMIPS_OpBitLen32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (BitLen32 <t> x)
// cond:
// result: (SUB (MOVWconst [32]) (CLZ <t> x))
@@ -995,7 +997,7 @@ func rewriteValueMIPS_OpBitLen32_0(v *Value) bool {
t := v.Type
x := v.Args[0]
v.reset(OpMIPSSUB)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 32
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
@@ -1149,8 +1151,8 @@ func rewriteValueMIPS_OpConvert_0(v *Value) bool {
func rewriteValueMIPS_OpCtz32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Ctz32 <t> x)
// cond:
// result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
@@ -1158,7 +1160,7 @@ func rewriteValueMIPS_OpCtz32_0(v *Value) bool {
t := v.Type
x := v.Args[0]
v.reset(OpMIPSSUB)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 32
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
@@ -1244,8 +1246,8 @@ func rewriteValueMIPS_OpCvt64Fto32F_0(v *Value) bool {
func rewriteValueMIPS_OpDiv16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16 x y)
// cond:
// result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
@@ -1253,11 +1255,11 @@ func rewriteValueMIPS_OpDiv16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1267,8 +1269,8 @@ func rewriteValueMIPS_OpDiv16_0(v *Value) bool {
func rewriteValueMIPS_OpDiv16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div16u x y)
// cond:
// result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -1276,11 +1278,11 @@ func rewriteValueMIPS_OpDiv16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1290,8 +1292,8 @@ func rewriteValueMIPS_OpDiv16u_0(v *Value) bool {
func rewriteValueMIPS_OpDiv32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32 x y)
// cond:
// result: (Select1 (DIV x y))
@@ -1299,7 +1301,7 @@ func rewriteValueMIPS_OpDiv32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1322,8 +1324,8 @@ func rewriteValueMIPS_OpDiv32F_0(v *Value) bool {
func rewriteValueMIPS_OpDiv32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div32u x y)
// cond:
// result: (Select1 (DIVU x y))
@@ -1331,7 +1333,7 @@ func rewriteValueMIPS_OpDiv32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1354,8 +1356,8 @@ func rewriteValueMIPS_OpDiv64F_0(v *Value) bool {
func rewriteValueMIPS_OpDiv8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8 x y)
// cond:
// result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
@@ -1363,11 +1365,11 @@ func rewriteValueMIPS_OpDiv8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1377,8 +1379,8 @@ func rewriteValueMIPS_OpDiv8_0(v *Value) bool {
func rewriteValueMIPS_OpDiv8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Div8u x y)
// cond:
// result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -1386,11 +1388,11 @@ func rewriteValueMIPS_OpDiv8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1400,8 +1402,8 @@ func rewriteValueMIPS_OpDiv8u_0(v *Value) bool {
func rewriteValueMIPS_OpEq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq16 x y)
// cond:
// result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -1410,11 +1412,11 @@ func rewriteValueMIPS_OpEq16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1424,8 +1426,8 @@ func rewriteValueMIPS_OpEq16_0(v *Value) bool {
func rewriteValueMIPS_OpEq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq32 x y)
// cond:
// result: (SGTUconst [1] (XOR x y))
@@ -1434,7 +1436,7 @@ func rewriteValueMIPS_OpEq32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1451,7 +1453,7 @@ func rewriteValueMIPS_OpEq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1468,7 +1470,7 @@ func rewriteValueMIPS_OpEq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1478,8 +1480,8 @@ func rewriteValueMIPS_OpEq64F_0(v *Value) bool {
func rewriteValueMIPS_OpEq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Eq8 x y)
// cond:
// result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -1488,11 +1490,11 @@ func rewriteValueMIPS_OpEq8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1502,17 +1504,17 @@ func rewriteValueMIPS_OpEq8_0(v *Value) bool {
func rewriteValueMIPS_OpEqB_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqB x y)
// cond:
- // result: (XORconst [1] (XOR <types.Bool> x y))
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1522,8 +1524,8 @@ func rewriteValueMIPS_OpEqB_0(v *Value) bool {
func rewriteValueMIPS_OpEqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (EqPtr x y)
// cond:
// result: (SGTUconst [1] (XOR x y))
@@ -1532,7 +1534,7 @@ func rewriteValueMIPS_OpEqPtr_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1542,8 +1544,8 @@ func rewriteValueMIPS_OpEqPtr_0(v *Value) bool {
func rewriteValueMIPS_OpGeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
@@ -1552,11 +1554,11 @@ func rewriteValueMIPS_OpGeq16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1566,8 +1568,8 @@ func rewriteValueMIPS_OpGeq16_0(v *Value) bool {
func rewriteValueMIPS_OpGeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq16U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
@@ -1576,11 +1578,11 @@ func rewriteValueMIPS_OpGeq16U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1590,8 +1592,8 @@ func rewriteValueMIPS_OpGeq16U_0(v *Value) bool {
func rewriteValueMIPS_OpGeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32 x y)
// cond:
// result: (XORconst [1] (SGT y x))
@@ -1600,7 +1602,7 @@ func rewriteValueMIPS_OpGeq32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -1617,7 +1619,7 @@ func rewriteValueMIPS_OpGeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1627,8 +1629,8 @@ func rewriteValueMIPS_OpGeq32F_0(v *Value) bool {
func rewriteValueMIPS_OpGeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq32U x y)
// cond:
// result: (XORconst [1] (SGTU y x))
@@ -1637,7 +1639,7 @@ func rewriteValueMIPS_OpGeq32U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -1654,7 +1656,7 @@ func rewriteValueMIPS_OpGeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1664,8 +1666,8 @@ func rewriteValueMIPS_OpGeq64F_0(v *Value) bool {
func rewriteValueMIPS_OpGeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
@@ -1674,11 +1676,11 @@ func rewriteValueMIPS_OpGeq8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1688,8 +1690,8 @@ func rewriteValueMIPS_OpGeq8_0(v *Value) bool {
func rewriteValueMIPS_OpGeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Geq8U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
@@ -1698,11 +1700,11 @@ func rewriteValueMIPS_OpGeq8U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1721,8 +1723,8 @@ func rewriteValueMIPS_OpGetClosurePtr_0(v *Value) bool {
func rewriteValueMIPS_OpGreater16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16 x y)
// cond:
// result: (SGT (SignExt16to32 x) (SignExt16to32 y))
@@ -1730,10 +1732,10 @@ func rewriteValueMIPS_OpGreater16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1742,8 +1744,8 @@ func rewriteValueMIPS_OpGreater16_0(v *Value) bool {
func rewriteValueMIPS_OpGreater16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater16U x y)
// cond:
// result: (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
@@ -1751,10 +1753,10 @@ func rewriteValueMIPS_OpGreater16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1783,7 +1785,7 @@ func rewriteValueMIPS_OpGreater32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1813,7 +1815,7 @@ func rewriteValueMIPS_OpGreater64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1823,8 +1825,8 @@ func rewriteValueMIPS_OpGreater64F_0(v *Value) bool {
func rewriteValueMIPS_OpGreater8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8 x y)
// cond:
// result: (SGT (SignExt8to32 x) (SignExt8to32 y))
@@ -1832,10 +1834,10 @@ func rewriteValueMIPS_OpGreater8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1844,8 +1846,8 @@ func rewriteValueMIPS_OpGreater8_0(v *Value) bool {
func rewriteValueMIPS_OpGreater8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Greater8U x y)
// cond:
// result: (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
@@ -1853,10 +1855,10 @@ func rewriteValueMIPS_OpGreater8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v.AddArg(v1)
return true
@@ -1865,8 +1867,8 @@ func rewriteValueMIPS_OpGreater8U_0(v *Value) bool {
func rewriteValueMIPS_OpHmul32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32 x y)
// cond:
// result: (Select0 (MULT x y))
@@ -1874,7 +1876,7 @@ func rewriteValueMIPS_OpHmul32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULT, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1884,8 +1886,8 @@ func rewriteValueMIPS_OpHmul32_0(v *Value) bool {
func rewriteValueMIPS_OpHmul32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Hmul32u x y)
// cond:
// result: (Select0 (MULTU x y))
@@ -1893,7 +1895,7 @@ func rewriteValueMIPS_OpHmul32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -1931,8 +1933,8 @@ func rewriteValueMIPS_OpIsInBounds_0(v *Value) bool {
func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsNonNil ptr)
// cond:
// result: (SGTU ptr (MOVWconst [0]))
@@ -1940,7 +1942,7 @@ func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool {
ptr := v.Args[0]
v.reset(OpMIPSSGTU)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
return true
@@ -1949,8 +1951,8 @@ func rewriteValueMIPS_OpIsNonNil_0(v *Value) bool {
func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (IsSliceInBounds idx len)
// cond:
// result: (XORconst [1] (SGTU idx len))
@@ -1959,7 +1961,7 @@ func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool {
len := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
@@ -1969,8 +1971,8 @@ func rewriteValueMIPS_OpIsSliceInBounds_0(v *Value) bool {
func rewriteValueMIPS_OpLeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
@@ -1979,11 +1981,11 @@ func rewriteValueMIPS_OpLeq16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -1993,8 +1995,8 @@ func rewriteValueMIPS_OpLeq16_0(v *Value) bool {
func rewriteValueMIPS_OpLeq16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq16U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -2003,11 +2005,11 @@ func rewriteValueMIPS_OpLeq16U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2017,8 +2019,8 @@ func rewriteValueMIPS_OpLeq16U_0(v *Value) bool {
func rewriteValueMIPS_OpLeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32 x y)
// cond:
// result: (XORconst [1] (SGT x y))
@@ -2027,7 +2029,7 @@ func rewriteValueMIPS_OpLeq32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2044,7 +2046,7 @@ func rewriteValueMIPS_OpLeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2054,8 +2056,8 @@ func rewriteValueMIPS_OpLeq32F_0(v *Value) bool {
func rewriteValueMIPS_OpLeq32U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq32U x y)
// cond:
// result: (XORconst [1] (SGTU x y))
@@ -2064,7 +2066,7 @@ func rewriteValueMIPS_OpLeq32U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -2081,7 +2083,7 @@ func rewriteValueMIPS_OpLeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2091,8 +2093,8 @@ func rewriteValueMIPS_OpLeq64F_0(v *Value) bool {
func rewriteValueMIPS_OpLeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
@@ -2101,11 +2103,11 @@ func rewriteValueMIPS_OpLeq8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, types.Bool)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2115,8 +2117,8 @@ func rewriteValueMIPS_OpLeq8_0(v *Value) bool {
func rewriteValueMIPS_OpLeq8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Leq8U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -2125,11 +2127,11 @@ func rewriteValueMIPS_OpLeq8U_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -2139,8 +2141,8 @@ func rewriteValueMIPS_OpLeq8U_0(v *Value) bool {
func rewriteValueMIPS_OpLess16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16 x y)
// cond:
// result: (SGT (SignExt16to32 y) (SignExt16to32 x))
@@ -2148,10 +2150,10 @@ func rewriteValueMIPS_OpLess16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2160,8 +2162,8 @@ func rewriteValueMIPS_OpLess16_0(v *Value) bool {
func rewriteValueMIPS_OpLess16U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less16U x y)
// cond:
// result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
@@ -2169,10 +2171,10 @@ func rewriteValueMIPS_OpLess16U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2201,7 +2203,7 @@ func rewriteValueMIPS_OpLess32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2231,7 +2233,7 @@ func rewriteValueMIPS_OpLess64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagTrue)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
@@ -2241,8 +2243,8 @@ func rewriteValueMIPS_OpLess64F_0(v *Value) bool {
func rewriteValueMIPS_OpLess8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8 x y)
// cond:
// result: (SGT (SignExt8to32 y) (SignExt8to32 x))
@@ -2250,10 +2252,10 @@ func rewriteValueMIPS_OpLess8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2262,8 +2264,8 @@ func rewriteValueMIPS_OpLess8_0(v *Value) bool {
func rewriteValueMIPS_OpLess8U_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Less8U x y)
// cond:
// result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
@@ -2271,10 +2273,10 @@ func rewriteValueMIPS_OpLess8U_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v.AddArg(v1)
return true
@@ -2406,8 +2408,8 @@ func rewriteValueMIPS_OpLoad_0(v *Value) bool {
func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -2418,16 +2420,16 @@ func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2437,8 +2439,8 @@ func rewriteValueMIPS_OpLsh16x16_0(v *Value) bool {
func rewriteValueMIPS_OpLsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -2451,10 +2453,10 @@ func rewriteValueMIPS_OpLsh16x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -2501,8 +2503,8 @@ func rewriteValueMIPS_OpLsh16x64_0(v *Value) bool {
func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh16x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -2513,16 +2515,16 @@ func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2532,8 +2534,8 @@ func rewriteValueMIPS_OpLsh16x8_0(v *Value) bool {
func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -2544,16 +2546,16 @@ func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2563,8 +2565,8 @@ func rewriteValueMIPS_OpLsh32x16_0(v *Value) bool {
func rewriteValueMIPS_OpLsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -2577,10 +2579,10 @@ func rewriteValueMIPS_OpLsh32x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -2627,8 +2629,8 @@ func rewriteValueMIPS_OpLsh32x64_0(v *Value) bool {
func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh32x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -2639,16 +2641,16 @@ func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2658,8 +2660,8 @@ func rewriteValueMIPS_OpLsh32x8_0(v *Value) bool {
func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -2670,16 +2672,16 @@ func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -2689,8 +2691,8 @@ func rewriteValueMIPS_OpLsh8x16_0(v *Value) bool {
func rewriteValueMIPS_OpLsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -2703,10 +2705,10 @@ func rewriteValueMIPS_OpLsh8x32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -2753,8 +2755,8 @@ func rewriteValueMIPS_OpLsh8x64_0(v *Value) bool {
func rewriteValueMIPS_OpLsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Lsh8x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -2765,16 +2767,16 @@ func rewriteValueMIPS_OpLsh8x8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -6044,8 +6046,8 @@ func rewriteValueMIPS_OpMIPSXORconst_0(v *Value) bool {
func rewriteValueMIPS_OpMod16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16 x y)
// cond:
// result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
@@ -6053,11 +6055,11 @@ func rewriteValueMIPS_OpMod16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6067,8 +6069,8 @@ func rewriteValueMIPS_OpMod16_0(v *Value) bool {
func rewriteValueMIPS_OpMod16u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod16u x y)
// cond:
// result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
@@ -6076,11 +6078,11 @@ func rewriteValueMIPS_OpMod16u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6090,8 +6092,8 @@ func rewriteValueMIPS_OpMod16u_0(v *Value) bool {
func rewriteValueMIPS_OpMod32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32 x y)
// cond:
// result: (Select0 (DIV x y))
@@ -6099,7 +6101,7 @@ func rewriteValueMIPS_OpMod32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6109,8 +6111,8 @@ func rewriteValueMIPS_OpMod32_0(v *Value) bool {
func rewriteValueMIPS_OpMod32u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod32u x y)
// cond:
// result: (Select0 (DIVU x y))
@@ -6118,7 +6120,7 @@ func rewriteValueMIPS_OpMod32u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6128,8 +6130,8 @@ func rewriteValueMIPS_OpMod32u_0(v *Value) bool {
func rewriteValueMIPS_OpMod8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8 x y)
// cond:
// result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
@@ -6137,11 +6139,11 @@ func rewriteValueMIPS_OpMod8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(types.Int32, types.Int32))
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, types.Int32)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6151,8 +6153,8 @@ func rewriteValueMIPS_OpMod8_0(v *Value) bool {
func rewriteValueMIPS_OpMod8u_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Mod8u x y)
// cond:
// result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
@@ -6160,11 +6162,11 @@ func rewriteValueMIPS_OpMod8u_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(types.UInt32, types.UInt32))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
@@ -6174,8 +6176,8 @@ func rewriteValueMIPS_OpMod8u_0(v *Value) bool {
func rewriteValueMIPS_OpMove_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [0] _ _ mem)
// cond:
// result: mem
@@ -6201,7 +6203,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
mem := v.Args[2]
v.reset(OpMIPSMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6209,7 +6211,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore dst (MOVHUload src mem) mem)
for {
if v.AuxInt != 2 {
@@ -6219,12 +6221,12 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6244,14 +6246,14 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6260,7 +6262,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
@@ -6270,12 +6272,12 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
@@ -6283,7 +6285,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
if v.AuxInt != 4 {
@@ -6293,20 +6295,20 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, types.UInt16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, types.UInt16)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6327,30 +6329,30 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6373,22 +6375,22 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, types.UInt8)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6398,7 +6400,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
if v.AuxInt != 8 {
@@ -6408,20 +6410,20 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
@@ -6430,7 +6432,7 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
if v.AuxInt != 8 {
@@ -6440,36 +6442,36 @@ func rewriteValueMIPS_OpMove_0(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6486,10 +6488,10 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Move [6] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
if v.AuxInt != 6 {
@@ -6499,28 +6501,28 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, types.Int16)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6530,7 +6532,7 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
if v.AuxInt != 12 {
@@ -6540,28 +6542,28 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
@@ -6571,7 +6573,7 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
for {
if v.AuxInt != 16 {
@@ -6581,36 +6583,36 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 12
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v0.AuxInt = 12
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AuxInt = 4
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v4.AuxInt = 4
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
@@ -6621,23 +6623,23 @@ func rewriteValueMIPS_OpMove_10(v *Value) bool {
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: (s > 16 || t.(Type).Alignment()%4 != 0)
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.(*types.Type).Alignment()%4 != 0)
+ // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s > 16 || t.(Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
@@ -6781,8 +6783,8 @@ func rewriteValueMIPS_OpNeg8_0(v *Value) bool {
func rewriteValueMIPS_OpNeq16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq16 x y)
// cond:
// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
@@ -6790,15 +6792,15 @@ func rewriteValueMIPS_OpNeq16_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -6807,8 +6809,8 @@ func rewriteValueMIPS_OpNeq16_0(v *Value) bool {
func rewriteValueMIPS_OpNeq32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq32 x y)
// cond:
// result: (SGTU (XOR x y) (MOVWconst [0]))
@@ -6816,11 +6818,11 @@ func rewriteValueMIPS_OpNeq32_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
return true
@@ -6836,7 +6838,7 @@ func rewriteValueMIPS_OpNeq32F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagFalse)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6853,7 +6855,7 @@ func rewriteValueMIPS_OpNeq64F_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSFPFlagFalse)
- v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
@@ -6863,8 +6865,8 @@ func rewriteValueMIPS_OpNeq64F_0(v *Value) bool {
func rewriteValueMIPS_OpNeq8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Neq8 x y)
// cond:
// result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
@@ -6872,15 +6874,15 @@ func rewriteValueMIPS_OpNeq8_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
return true
@@ -6902,8 +6904,8 @@ func rewriteValueMIPS_OpNeqB_0(v *Value) bool {
func rewriteValueMIPS_OpNeqPtr_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (NeqPtr x y)
// cond:
// result: (SGTU (XOR x y) (MOVWconst [0]))
@@ -6911,11 +6913,11 @@ func rewriteValueMIPS_OpNeqPtr_0(v *Value) bool {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
return true
@@ -7052,8 +7054,8 @@ func rewriteValueMIPS_OpRound64F_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -7063,19 +7065,19 @@ func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7085,8 +7087,8 @@ func rewriteValueMIPS_OpRsh16Ux16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -7096,15 +7098,15 @@ func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v.AddArg(v3)
@@ -7114,11 +7116,11 @@ func rewriteValueMIPS_OpRsh16Ux32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7131,7 +7133,7 @@ func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool {
}
v.reset(OpMIPSSRLconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -7158,8 +7160,8 @@ func rewriteValueMIPS_OpRsh16Ux64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -7169,19 +7171,19 @@ func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7191,28 +7193,28 @@ func rewriteValueMIPS_OpRsh16Ux8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7223,24 +7225,24 @@ func rewriteValueMIPS_OpRsh16x16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v1.AddArg(v3)
@@ -7251,11 +7253,11 @@ func rewriteValueMIPS_OpRsh16x32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7268,7 +7270,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -7276,7 +7278,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
}
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) >= 16
- // result: (SRAconst (SLLconst <types.UInt32> x [16]) [31])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7289,7 +7291,7 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
@@ -7300,28 +7302,28 @@ func rewriteValueMIPS_OpRsh16x64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh16x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh16x8 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7332,8 +7334,8 @@ func rewriteValueMIPS_OpRsh16x8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -7344,16 +7346,16 @@ func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -7363,8 +7365,8 @@ func rewriteValueMIPS_OpRsh32Ux16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -7377,10 +7379,10 @@ func rewriteValueMIPS_OpRsh32Ux32_0(v *Value) bool {
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
@@ -7427,8 +7429,8 @@ func rewriteValueMIPS_OpRsh32Ux64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -7439,16 +7441,16 @@ func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool {
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
@@ -7458,26 +7460,26 @@ func rewriteValueMIPS_OpRsh32Ux8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x16 x y)
// cond:
- // result: (SRA x ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -7488,22 +7490,22 @@ func rewriteValueMIPS_OpRsh32x16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x32 x y)
// cond:
- // result: (SRA x ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = -1
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v2.AuxInt = 32
v2.AddArg(y)
v0.AddArg(v2)
@@ -7553,26 +7555,26 @@ func rewriteValueMIPS_OpRsh32x64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh32x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x8 x y)
// cond:
- // result: (SRA x ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
@@ -7583,8 +7585,8 @@ func rewriteValueMIPS_OpRsh32x8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
@@ -7594,19 +7596,19 @@ func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7616,8 +7618,8 @@ func rewriteValueMIPS_OpRsh8Ux16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
@@ -7627,15 +7629,15 @@ func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v.AddArg(v3)
@@ -7645,11 +7647,11 @@ func rewriteValueMIPS_OpRsh8Ux32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7662,7 +7664,7 @@ func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool {
}
v.reset(OpMIPSSRLconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -7689,8 +7691,8 @@ func rewriteValueMIPS_OpRsh8Ux64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
@@ -7700,19 +7702,19 @@ func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool {
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
@@ -7722,28 +7724,28 @@ func rewriteValueMIPS_OpRsh8Ux8_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x16_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7754,24 +7756,24 @@ func rewriteValueMIPS_OpRsh8x16_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x32_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = -1
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v3.AuxInt = 32
v3.AddArg(y)
v1.AddArg(v3)
@@ -7782,11 +7784,11 @@ func rewriteValueMIPS_OpRsh8x32_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7799,7 +7801,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -7807,7 +7809,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
}
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) >= 8
- // result: (SRAconst (SLLconst <types.UInt32> x [24]) [31])
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
@@ -7820,7 +7822,7 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
}
v.reset(OpMIPSSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
@@ -7831,28 +7833,28 @@ func rewriteValueMIPS_OpRsh8x64_0(v *Value) bool {
func rewriteValueMIPS_OpRsh8x8_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh8x8 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, types.Int32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, types.UInt32)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, types.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, types.UInt32)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
@@ -7863,8 +7865,8 @@ func rewriteValueMIPS_OpRsh8x8_0(v *Value) bool {
func rewriteValueMIPS_OpSelect0_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Select0 (Add32carry <t> x y))
// cond:
// result: (ADD <t.FieldType(0)> x y)
@@ -7996,7 +7998,7 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool {
v0.AuxInt = -1
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(x)
@@ -8023,7 +8025,7 @@ func rewriteValueMIPS_OpSelect0_0(v *Value) bool {
v0.AuxInt = -1
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(x)
@@ -8169,11 +8171,11 @@ func rewriteValueMIPS_OpSelect0_10(v *Value) bool {
func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Select1 (Add32carry <t> x y))
// cond:
- // result: (SGTU <types.Bool> x (ADD <t.FieldType(0)> x y))
+ // result: (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
for {
v_0 := v.Args[0]
if v_0.Op != OpAdd32carry {
@@ -8183,7 +8185,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpMIPSSGTU)
- v.Type = types.Bool
+ v.Type = typ.Bool
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0))
v0.AddArg(x)
@@ -8193,7 +8195,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
}
// match: (Select1 (Sub32carry <t> x y))
// cond:
- // result: (SGTU <types.Bool> (SUB <t.FieldType(0)> x y) x)
+ // result: (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
for {
v_0 := v.Args[0]
if v_0.Op != OpSub32carry {
@@ -8203,7 +8205,7 @@ func rewriteValueMIPS_OpSelect1_0(v *Value) bool {
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpMIPSSGTU)
- v.Type = types.Bool
+ v.Type = typ.Bool
v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0))
v0.AddArg(x)
v0.AddArg(y)
@@ -8560,14 +8562,14 @@ func rewriteValueMIPS_OpStaticCall_0(v *Value) bool {
}
func rewriteValueMIPS_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(*types.Type).Size() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(*types.Type).Size() == 1) {
break
}
v.reset(OpMIPSMOVBstore)
@@ -8577,14 +8579,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(*types.Type).Size() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(*types.Type).Size() == 2) {
break
}
v.reset(OpMIPSMOVHstore)
@@ -8594,14 +8596,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVWstore)
@@ -8611,14 +8613,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVFstore)
@@ -8628,14 +8630,14 @@ func rewriteValueMIPS_OpStore_0(v *Value) bool {
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVDstore)
@@ -8822,8 +8824,8 @@ func rewriteValueMIPS_OpXor8_0(v *Value) bool {
func rewriteValueMIPS_OpZero_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [0] _ mem)
// cond:
// result: mem
@@ -8848,14 +8850,14 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
mem := v.Args[1]
v.reset(OpMIPSMOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 2 {
@@ -8864,12 +8866,12 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
@@ -8887,13 +8889,13 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -8901,7 +8903,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 4 {
@@ -8910,19 +8912,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 4 {
@@ -8931,19 +8933,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -8962,25 +8964,25 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -9001,19 +9003,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
v.reset(OpMIPSMOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9022,7 +9024,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(*types.Type).Alignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
for {
if v.AuxInt != 6 {
@@ -9031,25 +9033,25 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(*types.Type).Alignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9058,7 +9060,7 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 8 {
@@ -9067,19 +9069,19 @@ func rewriteValueMIPS_OpZero_0(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
@@ -9093,10 +9095,10 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zero [12] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
for {
if v.AuxInt != 12 {
@@ -9105,25 +9107,25 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
@@ -9132,7 +9134,7 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(*types.Type).Alignment()%4 == 0
// result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
for {
if v.AuxInt != 16 {
@@ -9141,31 +9143,31 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(*types.Type).Alignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
v.AuxInt = 12
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v2.AuxInt = 0
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v3.AuxInt = 4
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v4.AuxInt = 0
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
@@ -9175,21 +9177,21 @@ func rewriteValueMIPS_OpZero_10(v *Value) bool {
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 16 || t.(Type).Alignment()%4 != 0)
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.(*types.Type).Alignment()%4 != 0)
+ // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s > 16 || t.(Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(*types.Type).Alignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
@@ -9233,17 +9235,17 @@ func rewriteValueMIPS_OpZeroExt8to32_0(v *Value) bool {
func rewriteValueMIPS_OpZeromask_0(v *Value) bool {
b := v.Block
_ = b
- types := &b.Func.Config.Types
- _ = types
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Zeromask x)
// cond:
// result: (NEG (SGTU x (MOVWconst [0])))
for {
x := v.Args[0]
v.reset(OpMIPSNEG)
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, types.Bool)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
v1.AuxInt = 0
v0.AddArg(v1)
v.AddArg(v0)
@@ -9255,8 +9257,8 @@ func rewriteBlockMIPS(b *Block) bool {
_ = config
fe := b.Func.fe
_ = fe
- types := &config.Types
- _ = types
+ typ := &config.Types
+ _ = typ
switch b.Kind {
case BlockMIPSEQ:
// match: (EQ (FPFlagTrue cmp) yes no)