aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/rewriteARM64.go
diff options
context:
space:
mode:
authorfanzha02 <fannie.zhang@arm.com>2020-05-14 17:01:11 +0800
committerKeith Randall <khr@golang.org>2020-08-24 14:38:38 +0000
commit85902b6786bbe40b297cbbf823f489b07c654bbd (patch)
tree6b7eabd4cdffc97c1af52a680e42b5cd1f100294 /src/cmd/compile/internal/ssa/rewriteARM64.go
parent0e031676288ddd56fb410b6b27807a180a585db3 (diff)
downloadgo-85902b6786bbe40b297cbbf823f489b07c654bbd.tar.gz
go-85902b6786bbe40b297cbbf823f489b07c654bbd.zip
cmd/compile: convert rest ARM64.rules lines to typed aux mode
This patch adds the ARM6464Bitfield auxInt to auxIntType() and returns its Go type as "arm64Bitfield" type, which is defined as int16 type. And the Go type of SymOff auxInt is int32, but some functions (such as min(), areAdjacentOffsets() and read16/32/64(),etc.) use SymOff as an input parameter and treat its type as int64, this patch adds the type conversion for these rules. Passes toolstash-check -all. Change-Id: Ib234b48d0a97ef244dd37878e06b5825316dd782 Reviewed-on: https://go-review.googlesource.com/c/go/+/234378 Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewriteARM64.go')
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go2704
1 files changed, 1352 insertions, 1352 deletions
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 8655880b38..453578aa9a 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -1229,13 +1229,13 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ADDshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -1251,13 +1251,13 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ADDshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -1273,13 +1273,13 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ADDshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -1300,14 +1300,14 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
@@ -1323,15 +1323,15 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -1340,11 +1340,11 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
@@ -1370,14 +1370,14 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
@@ -1393,15 +1393,15 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -1410,11 +1410,11 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
@@ -1438,14 +1438,14 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
@@ -1462,15 +1462,15 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -1479,11 +1479,11 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
@@ -1513,14 +1513,14 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
@@ -1536,15 +1536,15 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -1553,11 +1553,11 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
@@ -1650,16 +1650,16 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
// match: (ADDshiftLL (MOVDconst [c]) x [d])
// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1667,22 +1667,22 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
// match: (ADDshiftLL x (MOVDconst [c]) [d])
// result: (ADDconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ADDconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// match: (ADDshiftLL [c] (SRLconst x [64-c]) x)
// result: (RORconst [64-c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
@@ -1690,7 +1690,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg(x)
return true
}
@@ -1699,24 +1699,24 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
// result: (RORWconst [32-c] x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg(x)
return true
}
// match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// result: (REV16W x)
for {
- if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
@@ -1730,14 +1730,14 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
// match: (ADDshiftLL [c] (SRLconst x [64-c]) x2)
// result: (EXTRconst [64-c] x2 x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
x2 := v_1
v.reset(OpARM64EXTRconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg2(x2, x)
return true
}
@@ -1746,18 +1746,18 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
x2 := v_1
if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg2(x2, x)
return true
}
@@ -1770,16 +1770,16 @@ func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool {
// match: (ADDshiftRA (MOVDconst [c]) x [d])
// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1787,14 +1787,14 @@ func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool {
// match: (ADDshiftRA x (MOVDconst [c]) [d])
// result: (ADDconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ADDconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -1807,16 +1807,16 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
// match: (ADDshiftRL (MOVDconst [c]) x [d])
// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -1824,22 +1824,22 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
// match: (ADDshiftRL x (MOVDconst [c]) [d])
// result: (ADDconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ADDconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// match: (ADDshiftRL [c] (SLLconst x [64-c]) x)
// result: (RORconst [ c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
@@ -1847,7 +1847,7 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -1856,8 +1856,8 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
// result: (RORWconst [c] x)
for {
t := v.Type
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
break
}
x := v_0.Args[0]
@@ -1865,7 +1865,7 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -1925,13 +1925,13 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool {
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ANDshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -1947,13 +1947,13 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool {
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ANDshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -1969,13 +1969,13 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool {
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ANDshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2074,17 +2074,17 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
// cond: isARM64BFMask(sc, ac, sc)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
- ac := v.AuxInt
+ ac := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
@@ -2092,17 +2092,17 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
// cond: isARM64BFMask(sc, ac, 0)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
- ac := v.AuxInt
+ ac := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
v.AddArg(x)
return true
}
@@ -2115,16 +2115,16 @@ func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
// match: (ANDshiftLL (MOVDconst [c]) x [d])
// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2132,14 +2132,14 @@ func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
// match: (ANDshiftLL x (MOVDconst [c]) [d])
// result: (ANDconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -2147,13 +2147,13 @@ func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SLLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
@@ -2169,16 +2169,16 @@ func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
// match: (ANDshiftRA (MOVDconst [c]) x [d])
// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2186,14 +2186,14 @@ func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
// match: (ANDshiftRA x (MOVDconst [c]) [d])
// result: (ANDconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -2201,13 +2201,13 @@ func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRAconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
@@ -2223,16 +2223,16 @@ func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
// match: (ANDshiftRL (MOVDconst [c]) x [d])
// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2240,14 +2240,14 @@ func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
// match: (ANDshiftRL x (MOVDconst [c]) [d])
// result: (ANDconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -2255,13 +2255,13 @@ func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
@@ -2306,13 +2306,13 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool {
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64BICshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2325,13 +2325,13 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool {
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64BICshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2344,13 +2344,13 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool {
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64BICshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2362,14 +2362,14 @@ func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
// match: (BICshiftLL x (MOVDconst [c]) [d])
// result: (ANDconst x [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -2377,17 +2377,17 @@ func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -2398,14 +2398,14 @@ func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
// match: (BICshiftRA x (MOVDconst [c]) [d])
// result: (ANDconst x [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
v.AddArg(x)
return true
}
@@ -2413,17 +2413,17 @@ func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -2434,14 +2434,14 @@ func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
// match: (BICshiftRL x (MOVDconst [c]) [d])
// result: (ANDconst x [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -2449,17 +2449,17 @@ func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -2493,13 +2493,13 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool {
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64CMNshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2515,13 +2515,13 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool {
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64CMNshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2537,13 +2537,13 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool {
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64CMNshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2611,16 +2611,16 @@ func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool {
// match: (CMNshiftLL (MOVDconst [c]) x [d])
// result: (CMNconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64CMNconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2628,14 +2628,14 @@ func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool {
// match: (CMNshiftLL x (MOVDconst [c]) [d])
// result: (CMNconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMNconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -2648,16 +2648,16 @@ func rewriteValueARM64_OpARM64CMNshiftRA(v *Value) bool {
// match: (CMNshiftRA (MOVDconst [c]) x [d])
// result: (CMNconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64CMNconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2665,14 +2665,14 @@ func rewriteValueARM64_OpARM64CMNshiftRA(v *Value) bool {
// match: (CMNshiftRA x (MOVDconst [c]) [d])
// result: (CMNconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMNconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -2685,16 +2685,16 @@ func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool {
// match: (CMNshiftRL (MOVDconst [c]) x [d])
// result: (CMNconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64CMNconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -2702,14 +2702,14 @@ func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool {
// match: (CMNshiftRL x (MOVDconst [c]) [d])
// result: (CMNconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMNconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -2771,13 +2771,13 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64CMPshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2789,7 +2789,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
if x0.Op != OpARM64SLLconst {
break
}
- c := x0.AuxInt
+ c := auxIntToInt64(x0.AuxInt)
y := x0.Args[0]
x1 := v_1
if !(clobberIfDead(x0)) {
@@ -2797,7 +2797,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
}
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg2(x1, y)
v.AddArg(v0)
return true
@@ -2811,13 +2811,13 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64CMPshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2829,7 +2829,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
if x0.Op != OpARM64SRLconst {
break
}
- c := x0.AuxInt
+ c := auxIntToInt64(x0.AuxInt)
y := x0.Args[0]
x1 := v_1
if !(clobberIfDead(x0)) {
@@ -2837,7 +2837,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
}
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg2(x1, y)
v.AddArg(v0)
return true
@@ -2851,13 +2851,13 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64CMPshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -2869,7 +2869,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
if x0.Op != OpARM64SRAconst {
break
}
- c := x0.AuxInt
+ c := auxIntToInt64(x0.AuxInt)
y := x0.Args[0]
x1 := v_1
if !(clobberIfDead(x0)) {
@@ -2877,7 +2877,7 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
}
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg2(x1, y)
v.AddArg(v0)
return true
@@ -3064,17 +3064,17 @@ func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool {
// match: (CMPshiftLL (MOVDconst [c]) x [d])
// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v1.AuxInt = d
+ v1.AuxInt = int64ToAuxInt(d)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
@@ -3083,14 +3083,14 @@ func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool {
// match: (CMPshiftLL x (MOVDconst [c]) [d])
// result: (CMPconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMPconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -3103,17 +3103,17 @@ func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool {
// match: (CMPshiftRA (MOVDconst [c]) x [d])
// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v1.AuxInt = d
+ v1.AuxInt = int64ToAuxInt(d)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
@@ -3122,14 +3122,14 @@ func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool {
// match: (CMPshiftRA x (MOVDconst [c]) [d])
// result: (CMPconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMPconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -3142,17 +3142,17 @@ func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool {
// match: (CMPshiftRL (MOVDconst [c]) x [d])
// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v1.AuxInt = d
+ v1.AuxInt = int64ToAuxInt(d)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
@@ -3161,14 +3161,14 @@ func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool {
// match: (CMPshiftRL x (MOVDconst [c]) [d])
// result: (CMPconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMPconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -3444,13 +3444,13 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool {
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64EONshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -3463,13 +3463,13 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool {
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64EONshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -3482,13 +3482,13 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool {
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64EONshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -3500,14 +3500,14 @@ func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
// match: (EONshiftLL x (MOVDconst [c]) [d])
// result: (XORconst x [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -3515,17 +3515,17 @@ func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
@@ -3536,14 +3536,14 @@ func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool {
// match: (EONshiftRA x (MOVDconst [c]) [d])
// result: (XORconst x [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
v.AddArg(x)
return true
}
@@ -3551,17 +3551,17 @@ func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool {
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
@@ -3572,14 +3572,14 @@ func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool {
// match: (EONshiftRL x (MOVDconst [c]) [d])
// result: (XORconst x [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -3587,17 +3587,17 @@ func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
@@ -6266,15 +6266,15 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
}
// match: (MOVBUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read8(sym, off))])
+ // result: (MOVDconst [int64(read8(sym, int64(off)))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read8(sym, off))
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
return true
}
return false
@@ -6417,13 +6417,13 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
v.AddArg(x)
return true
}
@@ -6434,13 +6434,13 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, 8)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
v.AddArg(x)
return true
}
@@ -6648,13 +6648,13 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < 8) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc, 8-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
v.AddArg(x)
return true
}
@@ -6853,15 +6853,15 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -6870,8 +6870,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -6879,10 +6879,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -6892,7 +6892,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
continue
}
w := v_1.Args[0]
@@ -6916,15 +6916,15 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -6933,8 +6933,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -6942,10 +6942,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -6955,7 +6955,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
continue
}
w := v_1.Args[0]
@@ -6979,15 +6979,15 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -6996,8 +6996,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -7005,10 +7005,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -7018,7 +7018,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
continue
}
w := v_1.Args[0]
@@ -7042,10 +7042,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
break
}
v_1_0 := v_1.Args[0]
@@ -7054,7 +7054,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
w := v_1_0.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -7063,8 +7063,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -7072,10 +7072,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -7085,7 +7085,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
continue
}
v_1_0 := v_1.Args[0]
@@ -7113,27 +7113,27 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
@@ -7141,10 +7141,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -7157,7 +7157,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVBstoreidx {
@@ -7167,7 +7167,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
@@ -7177,19 +7177,19 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
// match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
- // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x)
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64UBFX {
break
}
- bfc := v_1.AuxInt
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -7198,24 +7198,24 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
if w0.Op != OpARM64UBFX {
break
}
- bfc2 := w0.AuxInt
- if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) {
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
// match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
- // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x)
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -7228,7 +7228,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
if v_1.Op != OpARM64UBFX {
continue
}
- bfc := v_1.AuxInt
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVBstoreidx {
@@ -7241,8 +7241,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
if w0.Op != OpARM64UBFX {
continue
}
- bfc2 := w0.AuxInt
- if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) {
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
@@ -7255,26 +7255,26 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64MOVDreg {
break
}
w := v_1_0.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
break
}
w0_0 := w0.Args[0]
@@ -7282,8 +7282,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
@@ -7291,10 +7291,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -7307,7 +7307,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64MOVDreg {
continue
@@ -7321,7 +7321,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
continue
}
w0_0 := w0.Args[0]
@@ -7338,12 +7338,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7351,11 +7351,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7363,11 +7363,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
@@ -7375,11 +7375,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
- if x3.Op != OpARM64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
@@ -7387,11 +7387,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x4 := x3.Args[2]
- if x4.Op != OpARM64MOVBstore || x4.AuxInt != i-5 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != i-5 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
@@ -7399,11 +7399,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
break
}
x5 := x4.Args[2]
- if x5.Op != OpARM64MOVBstore || x5.AuxInt != i-6 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != i-6 || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
@@ -7411,11 +7411,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
break
}
x6 := x5.Args[2]
- if x6.Op != OpARM64MOVBstore || x6.AuxInt != i-7 || x6.Aux != s {
+ if x6.Op != OpARM64MOVBstore || auxIntToInt32(x6.AuxInt) != i-7 || auxToSym(x6.Aux) != s {
break
}
mem := x6.Args[2]
@@ -7423,12 +7423,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpARM64SRLconst || x6_1.AuxInt != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x6_1.Op != OpARM64SRLconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpARM64MOVDstore)
- v.AuxInt = i - 7
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -7438,14 +7438,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
for {
- if v.AuxInt != 7 {
+ if auxIntToInt32(v.AuxInt) != 7 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 6 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 6 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7453,11 +7453,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 5 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7465,11 +7465,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != 4 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != 4 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
@@ -7477,11 +7477,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
- if x3.Op != OpARM64MOVBstore || x3.AuxInt != 3 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
@@ -7489,11 +7489,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x4 := x3.Args[2]
- if x4.Op != OpARM64MOVBstore || x4.AuxInt != 2 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != 2 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
@@ -7501,11 +7501,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
break
}
x5 := x4.Args[2]
- if x5.Op != OpARM64MOVBstore || x5.AuxInt != 1 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != 1 || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
@@ -7520,7 +7520,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr1 := p1_0
idx1 := p1_1
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
continue
}
x6 := x5.Args[2]
@@ -7531,7 +7531,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x6.Args[0]
idx0 := x6.Args[1]
x6_2 := x6.Args[2]
- if x6_2.Op != OpARM64SRLconst || x6_2.AuxInt != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x6_2.Op != OpARM64SRLconst || auxIntToInt64(x6_2.AuxInt) != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
continue
}
v.reset(OpARM64MOVDstoreidx)
@@ -7546,12 +7546,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7559,11 +7559,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7571,11 +7571,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
@@ -7583,12 +7583,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64UBFX || x2_1.AuxInt != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_1.Op != OpARM64UBFX || auxIntToArm64BitField(x2_1.AuxInt) != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -7598,14 +7598,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7613,11 +7613,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7632,7 +7632,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr1 := p1_0
idx1 := p1_1
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
continue
}
x2 := x1.Args[2]
@@ -7643,7 +7643,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x2.Args[0]
idx0 := x2.Args[1]
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
continue
}
v.reset(OpARM64MOVWstoreidx)
@@ -7658,12 +7658,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7671,7 +7671,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
break
}
x0_1_0 := x0_1.Args[0]
@@ -7679,7 +7679,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7687,7 +7687,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
break
}
x1_1_0 := x1_1.Args[0]
@@ -7695,7 +7695,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
@@ -7703,7 +7703,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 {
break
}
x2_1_0 := x2_1.Args[0]
@@ -7711,8 +7711,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -7722,14 +7722,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7737,7 +7737,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
break
}
x0_1_0 := x0_1.Args[0]
@@ -7745,7 +7745,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7760,7 +7760,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr1 := p1_0
idx1 := p1_1
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
continue
}
x1_1_0 := x1_1.Args[0]
@@ -7775,7 +7775,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x2.Args[0]
idx0 := x2.Args[1]
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 {
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 {
continue
}
x2_2_0 := x2_2.Args[0]
@@ -7794,12 +7794,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7807,11 +7807,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7819,11 +7819,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
@@ -7831,12 +7831,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -7846,14 +7846,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
@@ -7861,11 +7861,11 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
@@ -7880,7 +7880,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr1 := p1_0
idx1 := p1_1
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
continue
}
x2 := x1.Args[2]
@@ -7891,7 +7891,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x2.Args[0]
idx0 := x2.Args[1]
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
continue
}
v.reset(OpARM64MOVWstoreidx)
@@ -7906,12 +7906,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -7919,12 +7919,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64SRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -7934,10 +7934,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -7956,7 +7956,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
@@ -7971,12 +7971,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -7984,12 +7984,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64UBFX || x_1.AuxInt != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -7999,10 +7999,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -8021,7 +8021,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
@@ -8036,12 +8036,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -8049,7 +8049,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64SRLconst || x_1.AuxInt != 8 {
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 {
break
}
x_1_0 := x_1.Args[0]
@@ -8057,8 +8057,8 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -8068,10 +8068,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -8090,7 +8090,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 {
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 {
continue
}
x_2_0 := x_2.Args[0]
@@ -8109,12 +8109,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -8122,12 +8122,12 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64UBFX || x_1.AuxInt != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
@@ -8137,10 +8137,10 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -8159,7 +8159,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
@@ -8318,11 +8318,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
// result: (MOVHstoreidx ptr idx w mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
idx := v_1.Args[0]
- if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 8 {
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 8 {
break
}
w := v_2.Args[0]
@@ -8343,7 +8343,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
// result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 3 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 3 {
break
}
idx := v_1.Args[0]
@@ -8357,11 +8357,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 2 || idx != x0_1.Args[0] {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 || idx != x0_1.Args[0] {
break
}
x0_2 := x0.Args[2]
- if x0_2.Op != OpARM64UBFX || x0_2.AuxInt != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
break
}
x1 := x0.Args[3]
@@ -8373,11 +8373,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] {
break
}
x1_2 := x1.Args[2]
- if x1_2.Op != OpARM64UBFX || x1_2.AuxInt != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
break
}
x2 := x1.Args[3]
@@ -8389,7 +8389,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstoreidx)
@@ -8414,11 +8414,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 1 || idx != x0_1.Args[0] {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 || idx != x0_1.Args[0] {
break
}
x0_2 := x0.Args[2]
- if x0_2.Op != OpARM64UBFX || x0_2.AuxInt != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
break
}
x1 := x0.Args[3]
@@ -8430,11 +8430,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] {
break
}
x1_2 := x1.Args[2]
- if x1_2.Op != OpARM64UBFX || x1_2.AuxInt != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
break
}
x2 := x1.Args[3]
@@ -8446,11 +8446,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 3 || idx != x2_1.Args[0] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] {
break
}
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstoreidx)
@@ -8462,7 +8462,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
// result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
idx := v_1.Args[0]
@@ -8476,7 +8476,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstoreidx)
@@ -8501,11 +8501,11 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64ADDconst || x_1.AuxInt != 1 || idx != x_1.Args[0] {
+ if x_1.Op != OpARM64ADDconst || auxIntToInt64(x_1.AuxInt) != 1 || idx != x_1.Args[0] {
break
}
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstoreidx)
@@ -8582,28 +8582,28 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
return true
}
// match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,1) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVHstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),1) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVBstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 1) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 1) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVHstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
@@ -8611,10 +8611,10 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstorezeroidx ptr1 idx1 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -8687,7 +8687,7 @@ func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool {
// result: (MOVHstorezeroidx ptr idx mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
idx := v_1.Args[0]
@@ -8830,15 +8830,15 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
}
// match: (MOVDload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read64(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@@ -9336,28 +9336,28 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
return true
}
// match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,8) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVQstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),8) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVDstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 8) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 8) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVQstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
@@ -9365,10 +9365,10 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVQstorezero [0] {s} p0 mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt32(v.AuxInt) != 8 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p0 := v_0
if p0.Op != OpARM64ADD {
break
@@ -9390,8 +9390,8 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
continue
}
v.reset(OpARM64MOVQstorezero)
- v.AuxInt = 0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
}
@@ -9401,12 +9401,12 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVQstorezero [0] {s} p0 mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt32(v.AuxInt) != 8 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p0 := v_0
- if p0.Op != OpARM64ADDshiftLL || p0.AuxInt != 3 {
+ if p0.Op != OpARM64ADDshiftLL || auxIntToInt64(p0.AuxInt) != 3 {
break
}
idx0 := p0.Args[1]
@@ -9422,8 +9422,8 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
break
}
v.reset(OpARM64MOVQstorezero)
- v.AuxInt = 0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
}
@@ -9629,15 +9629,15 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
}
// match: (MOVHUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@@ -9899,13 +9899,13 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
v.AddArg(x)
return true
}
@@ -9916,13 +9916,13 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, 16)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
v.AddArg(x)
return true
}
@@ -10313,13 +10313,13 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < 16) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc, 16-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
v.AddArg(x)
return true
}
@@ -10504,15 +10504,15 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -10521,8 +10521,8 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -10530,10 +10530,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -10543,7 +10543,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
continue
}
w := v_1.Args[0]
@@ -10567,16 +10567,16 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
@@ -10592,7 +10592,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
@@ -10601,15 +10601,15 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -10618,8 +10618,8 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -10627,10 +10627,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -10640,7 +10640,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
continue
}
w := v_1.Args[0]
@@ -10664,16 +10664,16 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
@@ -10689,7 +10689,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
@@ -10698,10 +10698,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
v_1_0 := v_1.Args[0]
@@ -10710,7 +10710,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
}
w := v_1_0.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -10719,8 +10719,8 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -10728,10 +10728,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -10741,7 +10741,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
continue
}
v_1_0 := v_1.Args[0]
@@ -10769,16 +10769,16 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
v_1_0 := v_1.Args[0]
@@ -10798,7 +10798,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
@@ -10807,27 +10807,27 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
@@ -10835,10 +10835,10 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -10851,7 +10851,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVHstoreidx {
@@ -10861,7 +10861,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVWstoreidx)
@@ -10874,11 +10874,11 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
@@ -10886,7 +10886,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVHstoreidx2 {
@@ -10896,12 +10896,12 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
break
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w0, mem)
return true
@@ -11087,11 +11087,11 @@ func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool {
// result: (MOVWstoreidx ptr idx w mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
break
}
idx := v_1.Args[0]
- if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 16 {
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 16 {
break
}
w := v_2.Args[0]
@@ -11291,28 +11291,28 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
return true
}
// match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,2) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVWstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),2) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVHstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 2) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 2) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVWstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
@@ -11320,10 +11320,10 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstorezeroidx ptr1 idx1 mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -11353,11 +11353,11 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
@@ -11374,7 +11374,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
}
v.reset(OpARM64MOVWstorezeroidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg3(ptr1, v0, mem)
return true
@@ -11484,7 +11484,7 @@ func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool {
// result: (MOVWstorezeroidx ptr idx mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
break
}
idx := v_1.Args[0]
@@ -11701,15 +11701,15 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
}
// match: (MOVWUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@@ -11996,13 +11996,13 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
v.AddArg(x)
return true
}
@@ -12013,13 +12013,13 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, 32)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
v.AddArg(x)
return true
}
@@ -12468,13 +12468,13 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < 32) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc, 32-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
v.AddArg(x)
return true
}
@@ -12642,15 +12642,15 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVDstore [i-4] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s {
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
@@ -12659,8 +12659,8 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
break
}
v.reset(OpARM64MOVDstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
@@ -12668,10 +12668,10 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVDstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -12681,7 +12681,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
continue
}
w := v_1.Args[0]
@@ -12705,16 +12705,16 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
w := v_1.Args[0]
@@ -12730,7 +12730,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
}
v.reset(OpARM64MOVDstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 2
+ v0.AuxInt = int64ToAuxInt(2)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
@@ -12739,27 +12739,27 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVDstore [i-4] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s {
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVDstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
@@ -12767,10 +12767,10 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVDstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -12783,7 +12783,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVWstoreidx {
@@ -12793,7 +12793,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVDstoreidx)
@@ -12806,11 +12806,11 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
break
}
idx0 := v_0.Args[1]
@@ -12818,7 +12818,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVWstoreidx4 {
@@ -12828,12 +12828,12 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
break
}
v.reset(OpARM64MOVDstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 2
+ v0.AuxInt = int64ToAuxInt(2)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w0, mem)
return true
@@ -12957,11 +12957,11 @@ func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool {
// result: (MOVDstoreidx ptr idx w mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
break
}
idx := v_1.Args[0]
- if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 32 {
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 32 {
break
}
w := v_2.Args[0]
@@ -13133,28 +13133,28 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
return true
}
// match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,4) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVDstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),4) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVWstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 4) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 4) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVDstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
@@ -13162,10 +13162,10 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVDstorezeroidx ptr1 idx1 mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
@@ -13195,11 +13195,11 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
break
}
idx0 := v_0.Args[1]
@@ -13216,7 +13216,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
}
v.reset(OpARM64MOVDstorezeroidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 2
+ v0.AuxInt = int64ToAuxInt(2)
v0.AddArg(idx1)
v.AddArg3(ptr1, v0, mem)
return true
@@ -13294,7 +13294,7 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool {
// result: (MOVDstorezeroidx ptr idx mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
break
}
idx := v_1.Args[0]
@@ -14668,13 +14668,13 @@ func rewriteValueARM64_OpARM64MVN(v *Value) bool {
if x.Op != OpARM64SLLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64MVNshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
@@ -14686,13 +14686,13 @@ func rewriteValueARM64_OpARM64MVN(v *Value) bool {
if x.Op != OpARM64SRLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64MVNshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
@@ -14704,13 +14704,13 @@ func rewriteValueARM64_OpARM64MVN(v *Value) bool {
if x.Op != OpARM64SRAconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64MVNshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
@@ -14721,13 +14721,13 @@ func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool {
// match: (MVNshiftLL (MOVDconst [c]) [d])
// result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
return true
}
return false
@@ -14737,13 +14737,13 @@ func rewriteValueARM64_OpARM64MVNshiftRA(v *Value) bool {
// match: (MVNshiftRA (MOVDconst [c]) [d])
// result: (MOVDconst [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
return true
}
return false
@@ -14753,13 +14753,13 @@ func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool {
// match: (MVNshiftRL (MOVDconst [c]) [d])
// result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
return true
}
return false
@@ -14809,13 +14809,13 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool {
if x.Op != OpARM64SLLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64NEGshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
@@ -14827,13 +14827,13 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool {
if x.Op != OpARM64SRLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64NEGshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
@@ -14845,13 +14845,13 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool {
if x.Op != OpARM64SRAconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64NEGshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
@@ -14862,13 +14862,13 @@ func rewriteValueARM64_OpARM64NEGshiftLL(v *Value) bool {
// match: (NEGshiftLL (MOVDconst [c]) [d])
// result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = -int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d)))
return true
}
return false
@@ -14878,13 +14878,13 @@ func rewriteValueARM64_OpARM64NEGshiftRA(v *Value) bool {
// match: (NEGshiftRA (MOVDconst [c]) [d])
// result: (MOVDconst [-(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = -(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(-(c >> uint64(d)))
return true
}
return false
@@ -14894,13 +14894,13 @@ func rewriteValueARM64_OpARM64NEGshiftRL(v *Value) bool {
// match: (NEGshiftRL (MOVDconst [c]) [d])
// result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = -int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d)))
return true
}
return false
@@ -14987,13 +14987,13 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ORshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -15009,13 +15009,13 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ORshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -15031,13 +15031,13 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ORshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -15058,14 +15058,14 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
@@ -15081,15 +15081,15 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -15098,11 +15098,11 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
@@ -15128,14 +15128,14 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
@@ -15151,15 +15151,15 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -15168,11 +15168,11 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
@@ -15196,14 +15196,14 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
@@ -15220,15 +15220,15 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -15237,11 +15237,11 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
@@ -15271,14 +15271,14 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
@@ -15294,15 +15294,15 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -15311,11 +15311,11 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
@@ -15325,50 +15325,50 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
break
}
// match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
- // cond: ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
+ // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
// result: (BFI [bfc] y x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpARM64UBFIZ {
continue
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpARM64ANDconst {
continue
}
- ac := v_1.AuxInt
+ ac := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
- if !(ac == ^((1<<uint(getARM64BFwidth(bfc)) - 1) << uint(getARM64BFlsb(bfc)))) {
+ if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
continue
}
v.reset(OpARM64BFI)
- v.AuxInt = bfc
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
v.AddArg2(y, x)
return true
}
break
}
// match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
- // cond: ac == ^(1<<uint(getARM64BFwidth(bfc))-1)
+ // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
// result: (BFXIL [bfc] y x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpARM64UBFX {
continue
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpARM64ANDconst {
continue
}
- ac := v_1.AuxInt
+ ac := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
- if !(ac == ^(1<<uint(getARM64BFwidth(bfc)) - 1)) {
+ if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
continue
}
v.reset(OpARM64BFXIL)
- v.AuxInt = bfc
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
v.AddArg2(y, x)
return true
}
@@ -15376,22 +15376,22 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
@@ -15402,8 +15402,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x0.Op != OpARM64MOVBUload {
continue
}
- i3 := x0.AuxInt
- s := x0.Aux
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o1.Args[1]
@@ -15414,8 +15414,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
@@ -15430,8 +15430,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
continue
}
- i1 := x2.AuxInt
- if x2.Aux != s {
+ i1 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -15446,8 +15446,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x3.Op != OpARM64MOVBUload {
continue
}
- i0 := x3.AuxInt
- if x3.Aux != s {
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -15457,9 +15457,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
b = mergePoint(b, x0, x1, x2, x3)
v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
@@ -15473,17 +15473,17 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
@@ -15491,10 +15491,10 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 {
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 3 {
continue
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o1.Args[1]
@@ -15502,7 +15502,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
@@ -15514,7 +15514,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -15561,17 +15561,17 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
@@ -15585,7 +15585,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 3 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 3 {
continue
}
idx := x0_1.Args[0]
@@ -15602,7 +15602,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o0.Args[1]
@@ -15618,7 +15618,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := v_1
@@ -15643,42 +15643,42 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
@@ -15689,8 +15689,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x0.Op != OpARM64MOVBUload {
continue
}
- i7 := x0.AuxInt
- s := x0.Aux
+ i7 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o5.Args[1]
@@ -15701,8 +15701,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
continue
}
- i6 := x1.AuxInt
- if x1.Aux != s {
+ i6 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
@@ -15717,8 +15717,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
continue
}
- i5 := x2.AuxInt
- if x2.Aux != s {
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -15733,8 +15733,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x3.Op != OpARM64MOVBUload {
continue
}
- i4 := x3.AuxInt
- if x3.Aux != s {
+ i4 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -15749,8 +15749,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x4.Op != OpARM64MOVBUload {
continue
}
- i3 := x4.AuxInt
- if x4.Aux != s {
+ i3 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
@@ -15765,8 +15765,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x5.Op != OpARM64MOVBUload {
continue
}
- i2 := x5.AuxInt
- if x5.Aux != s {
+ i2 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
@@ -15781,8 +15781,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x6.Op != OpARM64MOVBUload {
continue
}
- i1 := x6.AuxInt
- if x6.Aux != s {
+ i1 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
@@ -15797,8 +15797,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x7.Op != OpARM64MOVBUload {
continue
}
- i0 := x7.AuxInt
- if x7.Aux != s {
+ i0 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
continue
}
_ = x7.Args[1]
@@ -15808,9 +15808,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
@@ -15824,37 +15824,37 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
@@ -15862,10 +15862,10 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 {
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 7 {
continue
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o5.Args[1]
@@ -15873,7 +15873,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 6 || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
@@ -15885,7 +15885,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -15897,7 +15897,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 4 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -15909,7 +15909,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 3 || auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
@@ -15921,7 +15921,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x5 := y5.Args[0]
- if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 2 || auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
@@ -15933,7 +15933,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x6 := y6.Args[0]
- if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s {
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 1 || auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
@@ -15980,37 +15980,37 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
@@ -16024,7 +16024,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 7 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 7 {
continue
}
idx := x0_1.Args[0]
@@ -16041,7 +16041,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o4.Args[1]
@@ -16057,7 +16057,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := o3.Args[1]
@@ -16073,7 +16073,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
continue
}
y4 := o2.Args[1]
@@ -16089,7 +16089,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
continue
}
y5 := o1.Args[1]
@@ -16105,7 +16105,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
continue
}
y6 := o0.Args[1]
@@ -16121,7 +16121,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
continue
}
y7 := v_1
@@ -16146,22 +16146,22 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
@@ -16172,8 +16172,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x0.Op != OpARM64MOVBUload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o1.Args[1]
@@ -16184,8 +16184,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
@@ -16200,8 +16200,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
continue
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -16216,8 +16216,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x3.Op != OpARM64MOVBUload {
continue
}
- i3 := x3.AuxInt
- if x3.Aux != s {
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -16228,9 +16228,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
v.copyOf(v0)
v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
@@ -16245,17 +16245,17 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
@@ -16274,10 +16274,10 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
continue
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
@@ -16297,7 +16297,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -16310,7 +16310,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -16335,17 +16335,17 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
@@ -16372,7 +16372,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o0.Args[1]
@@ -16388,7 +16388,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := v_1
@@ -16404,7 +16404,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
continue
}
b = mergePoint(b, x0, x1, x2, x3)
@@ -16419,42 +16419,42 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
@@ -16465,8 +16465,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x0.Op != OpARM64MOVBUload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o5.Args[1]
@@ -16477,8 +16477,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
@@ -16493,8 +16493,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
continue
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -16509,8 +16509,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x3.Op != OpARM64MOVBUload {
continue
}
- i3 := x3.AuxInt
- if x3.Aux != s {
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -16525,8 +16525,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x4.Op != OpARM64MOVBUload {
continue
}
- i4 := x4.AuxInt
- if x4.Aux != s {
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
@@ -16541,8 +16541,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x5.Op != OpARM64MOVBUload {
continue
}
- i5 := x5.AuxInt
- if x5.Aux != s {
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
@@ -16557,8 +16557,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x6.Op != OpARM64MOVBUload {
continue
}
- i6 := x6.AuxInt
- if x6.Aux != s {
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
@@ -16573,8 +16573,8 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
if x7.Op != OpARM64MOVBUload {
continue
}
- i7 := x7.AuxInt
- if x7.Aux != s {
+ i7 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
continue
}
_ = x7.Args[1]
@@ -16585,9 +16585,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
v.copyOf(v0)
v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
@@ -16602,37 +16602,37 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
@@ -16651,10 +16651,10 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
continue
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
@@ -16674,7 +16674,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -16687,7 +16687,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -16699,7 +16699,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 4 || auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
@@ -16711,7 +16711,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x5 := y5.Args[0]
- if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 5 || auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
@@ -16723,7 +16723,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x6 := y6.Args[0]
- if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s {
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 6 || auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
@@ -16735,7 +16735,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x7 := y7.Args[0]
- if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 || x7.Aux != s {
+ if x7.Op != OpARM64MOVBUload || auxIntToInt32(x7.AuxInt) != 7 || auxToSym(x7.Aux) != s {
continue
}
_ = x7.Args[1]
@@ -16760,37 +16760,37 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
@@ -16817,7 +16817,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o4.Args[1]
@@ -16833,7 +16833,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := o3.Args[1]
@@ -16849,7 +16849,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
continue
}
y4 := o2.Args[1]
@@ -16865,7 +16865,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
continue
}
y5 := o1.Args[1]
@@ -16881,7 +16881,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
continue
}
y6 := o0.Args[1]
@@ -16897,7 +16897,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
continue
}
y7 := v_1
@@ -16913,7 +16913,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool {
continue
}
x7_1 := x7.Args[1]
- if x7_1.Op != OpARM64ADDconst || x7_1.AuxInt != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ if x7_1.Op != OpARM64ADDconst || auxIntToInt64(x7_1.AuxInt) != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
continue
}
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
@@ -16964,13 +16964,13 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool {
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64ORNshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -16983,13 +16983,13 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool {
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64ORNshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -17002,13 +17002,13 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool {
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64ORNshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -17020,14 +17020,14 @@ func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
// match: (ORNshiftLL x (MOVDconst [c]) [d])
// result: (ORconst x [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -17035,17 +17035,17 @@ func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
@@ -17056,14 +17056,14 @@ func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool {
// match: (ORNshiftRA x (MOVDconst [c]) [d])
// result: (ORconst x [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
v.AddArg(x)
return true
}
@@ -17071,17 +17071,17 @@ func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool {
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
@@ -17092,14 +17092,14 @@ func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool {
// match: (ORNshiftRL x (MOVDconst [c]) [d])
// result: (ORconst x [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -17107,17 +17107,17 @@ func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
@@ -17174,17 +17174,17 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool {
// cond: c2|c1 == ^0
// result: (ORconst [c1] x)
for {
- c1 := v.AuxInt
+ c1 := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- c2 := v_0.AuxInt
+ c2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c2|c1 == ^0) {
break
}
v.reset(OpARM64ORconst)
- v.AuxInt = c1
+ v.AuxInt = int64ToAuxInt(c1)
v.AddArg(x)
return true
}
@@ -17198,16 +17198,16 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// match: (ORshiftLL (MOVDconst [c]) x [d])
// result: (ORconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -17215,14 +17215,14 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// match: (ORshiftLL x (MOVDconst [c]) [d])
// result: (ORconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -17230,13 +17230,13 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SLLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
@@ -17246,8 +17246,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// match: ( ORshiftLL [c] (SRLconst x [64-c]) x)
// result: (RORconst [64-c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
@@ -17255,7 +17255,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg(x)
return true
}
@@ -17264,24 +17264,24 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: (RORWconst [32-c] x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg(x)
return true
}
// match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// result: (REV16W x)
for {
- if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
@@ -17295,14 +17295,14 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// match: ( ORshiftLL [c] (SRLconst x [64-c]) x2)
// result: (EXTRconst [64-c] x2 x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
x2 := v_1
v.reset(OpARM64EXTRconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg2(x2, x)
return true
}
@@ -17311,49 +17311,49 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
x2 := v_1
if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg2(x2, x)
return true
}
// match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
- // cond: sc == getARM64BFwidth(bfc)
+ // cond: sc == bfc.getARM64BFwidth()
// result: (BFXIL [bfc] y x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != sc {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc {
break
}
y := v_1.Args[0]
- if !(sc == getARM64BFwidth(bfc)) {
+ if !(sc == bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64BFXIL)
- v.AuxInt = bfc
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
v.AddArg2(y, x)
return true
}
// match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
- // result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
@@ -17364,8 +17364,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x0.Op != OpARM64MOVBUload {
break
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := v_1
@@ -17376,8 +17376,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
break
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -17387,9 +17387,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
@@ -17399,7 +17399,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
@@ -17418,10 +17418,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
@@ -17449,7 +17449,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
@@ -17476,7 +17476,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
break
}
b = mergePoint(b, x0, x1)
@@ -17487,14 +17487,14 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
}
// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
- // result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -17502,8 +17502,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x0.Op != OpARM64MOVHUload {
break
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o0.Args[1]
@@ -17514,8 +17514,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
break
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -17530,8 +17530,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
break
}
- i3 := x2.AuxInt
- if x2.Aux != s {
+ i3 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -17541,9 +17541,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
@@ -17553,11 +17553,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -17573,10 +17573,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
@@ -17596,7 +17596,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -17617,11 +17617,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -17645,7 +17645,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := v_1
@@ -17661,7 +17661,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
break
}
b = mergePoint(b, x0, x1, x2)
@@ -17675,11 +17675,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -17695,13 +17695,13 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
- if p1.Op != OpARM64ADDshiftLL || p1.AuxInt != 1 {
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 1 {
break
}
idx1 := p1.Args[1]
@@ -17714,7 +17714,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -17726,31 +17726,31 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
v.copyOf(v0)
v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type)
- v1.AuxInt = 1
+ v1.AuxInt = int64ToAuxInt(1)
v1.AddArg(idx0)
v0.AddArg3(ptr0, v1, mem)
return true
}
// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
// cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
- // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -17758,8 +17758,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x0.Op != OpARM64MOVWUload {
break
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o2.Args[1]
@@ -17770,8 +17770,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
break
}
- i4 := x1.AuxInt
- if x1.Aux != s {
+ i4 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -17786,8 +17786,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
break
}
- i5 := x2.AuxInt
- if x2.Aux != s {
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -17802,8 +17802,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x3.Op != OpARM64MOVBUload {
break
}
- i6 := x3.AuxInt
- if x3.Aux != s {
+ i6 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
@@ -17818,8 +17818,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x4.Op != OpARM64MOVBUload {
break
}
- i7 := x4.AuxInt
- if x4.Aux != s {
+ i7 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[1]
@@ -17829,9 +17829,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
b = mergePoint(b, x0, x1, x2, x3, x4)
v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
@@ -17841,21 +17841,21 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -17871,10 +17871,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
@@ -17894,7 +17894,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
@@ -17907,7 +17907,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
@@ -17919,7 +17919,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
continue
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
@@ -17939,21 +17939,21 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -17969,13 +17969,13 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
- if p1.Op != OpARM64ADDshiftLL || p1.AuxInt != 2 {
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 2 {
break
}
idx1 := p1.Args[1]
@@ -17988,7 +17988,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -18001,7 +18001,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
@@ -18013,7 +18013,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[1]
@@ -18024,7 +18024,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
v.copyOf(v0)
v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type)
- v1.AuxInt = 2
+ v1.AuxInt = int64ToAuxInt(2)
v1.AddArg(idx0)
v0.AddArg3(ptr0, v1, mem)
return true
@@ -18034,21 +18034,21 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -18072,7 +18072,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := o1.Args[1]
@@ -18088,7 +18088,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
break
}
y3 := o0.Args[1]
@@ -18104,7 +18104,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
break
}
y4 := v_1
@@ -18120,7 +18120,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
break
}
b = mergePoint(b, x0, x1, x2, x3, x4)
@@ -18134,7 +18134,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
@@ -18145,8 +18145,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x0.Op != OpARM64MOVBUload {
break
}
- i1 := x0.AuxInt
- s := x0.Aux
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := v_1
@@ -18157,8 +18157,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
break
}
- i0 := x1.AuxInt
- if x1.Aux != s {
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -18169,8 +18169,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
@@ -18180,7 +18180,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
@@ -18188,10 +18188,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVBUload || x0.AuxInt != 1 {
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 1 {
break
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p1 := x0.Args[0]
if p1.Op != OpARM64ADD {
@@ -18232,7 +18232,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
@@ -18246,7 +18246,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 1 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 {
break
}
idx := x0_1.Args[0]
@@ -18272,14 +18272,14 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
}
// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
- // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -18291,8 +18291,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x0.Op != OpARM64MOVHUload {
break
}
- i2 := x0.AuxInt
- s := x0.Aux
+ i2 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o0.Args[1]
@@ -18303,8 +18303,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
break
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -18319,8 +18319,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
break
}
- i0 := x2.AuxInt
- if x2.Aux != s {
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -18331,9 +18331,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
v0 := b.NewValue0(x2.Pos, OpARM64REVW, t)
v.copyOf(v0)
v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
@@ -18344,11 +18344,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -18357,10 +18357,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVHUload || x0.AuxInt != 2 {
+ if x0.Op != OpARM64MOVHUload || auxIntToInt32(x0.AuxInt) != 2 {
break
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o0.Args[1]
@@ -18368,7 +18368,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -18414,11 +18414,11 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
@@ -18433,7 +18433,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 2 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 {
break
}
idx := x0_1.Args[0]
@@ -18450,7 +18450,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := v_1
@@ -18475,24 +18475,24 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
}
// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
- // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -18504,8 +18504,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x0.Op != OpARM64MOVWUload {
break
}
- i4 := x0.AuxInt
- s := x0.Aux
+ i4 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o2.Args[1]
@@ -18516,8 +18516,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x1.Op != OpARM64MOVBUload {
break
}
- i3 := x1.AuxInt
- if x1.Aux != s {
+ i3 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -18532,8 +18532,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x2.Op != OpARM64MOVBUload {
break
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -18548,8 +18548,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x3.Op != OpARM64MOVBUload {
break
}
- i1 := x3.AuxInt
- if x3.Aux != s {
+ i1 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
@@ -18564,8 +18564,8 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if x4.Op != OpARM64MOVBUload {
break
}
- i0 := x4.AuxInt
- if x4.Aux != s {
+ i0 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[1]
@@ -18576,9 +18576,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
v0 := b.NewValue0(x4.Pos, OpARM64REV, t)
v.copyOf(v0)
v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
@@ -18589,21 +18589,21 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -18612,10 +18612,10 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVWUload || x0.AuxInt != 4 {
+ if x0.Op != OpARM64MOVWUload || auxIntToInt32(x0.AuxInt) != 4 {
break
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o2.Args[1]
@@ -18623,7 +18623,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 3 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 3 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
@@ -18635,7 +18635,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
@@ -18647,7 +18647,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 1 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 1 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
@@ -18693,21 +18693,21 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
@@ -18722,7 +18722,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 4 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 4 {
break
}
idx := x0_1.Args[0]
@@ -18739,7 +18739,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := o1.Args[1]
@@ -18755,7 +18755,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
break
}
y3 := o0.Args[1]
@@ -18771,7 +18771,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
break
}
y4 := v_1
@@ -18803,16 +18803,16 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
// match: (ORshiftRA (MOVDconst [c]) x [d])
// result: (ORconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -18820,14 +18820,14 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
// match: (ORshiftRA x (MOVDconst [c]) [d])
// result: (ORconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -18835,13 +18835,13 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRAconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
@@ -18857,16 +18857,16 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// match: (ORshiftRL (MOVDconst [c]) x [d])
// result: (ORconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -18874,14 +18874,14 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// match: (ORshiftRL x (MOVDconst [c]) [d])
// result: (ORconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -18889,13 +18889,13 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
@@ -18905,8 +18905,8 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// match: ( ORshiftRL [c] (SLLconst x [64-c]) x)
// result: (RORconst [ c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
@@ -18914,7 +18914,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -18923,8 +18923,8 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// result: (RORWconst [c] x)
for {
t := v.Type
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
break
}
x := v_0.Args[0]
@@ -18932,7 +18932,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -18940,22 +18940,22 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
// result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpARM64SLLconst {
break
}
- lc := v_1.AuxInt
+ lc := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) {
break
}
v.reset(OpARM64BFI)
- v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
v.AddArg2(x, y)
return true
}
@@ -18963,22 +18963,22 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
// cond: lc < rc && ac == ^((1<<uint(64-rc)-1))
// result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
y := v_0.Args[0]
if v_1.Op != OpARM64SLLconst {
break
}
- lc := v_1.AuxInt
+ lc := auxIntToInt64(v_1.AuxInt)
x := v_1.Args[0]
if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) {
break
}
v.reset(OpARM64BFXIL)
- v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg2(y, x)
return true
}
@@ -18989,14 +18989,14 @@ func rewriteValueARM64_OpARM64RORWconst(v *Value) bool {
// match: (RORWconst [c] (RORWconst [d] x))
// result: (RORWconst [(c+d)&31] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64RORWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpARM64RORWconst)
- v.AuxInt = (c + d) & 31
+ v.AuxInt = int64ToAuxInt((c + d) & 31)
v.AddArg(x)
return true
}
@@ -19007,14 +19007,14 @@ func rewriteValueARM64_OpARM64RORconst(v *Value) bool {
// match: (RORconst [c] (RORconst [d] x))
// result: (RORconst [(c+d)&63] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64RORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpARM64RORconst)
- v.AuxInt = (c + d) & 63
+ v.AuxInt = int64ToAuxInt((c + d) & 63)
v.AddArg(x)
return true
}
@@ -19109,8 +19109,8 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
// cond: 0 < c && c < 64
// result: (ANDconst [^(1<<uint(c)-1)] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
x := v_0.Args[0]
@@ -19118,7 +19118,7 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = ^(1<<uint(c) - 1)
+ v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1))
v.AddArg(x)
return true
}
@@ -19126,17 +19126,17 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
// cond: isARM64BFMask(sc, ac, 0)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, 0)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
v.AddArg(x)
return true
}
@@ -19144,7 +19144,7 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
// cond: isARM64BFMask(sc, 1<<32-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 32)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
@@ -19153,7 +19153,7 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, 32)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
v.AddArg(x)
return true
}
@@ -19161,7 +19161,7 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
// cond: isARM64BFMask(sc, 1<<16-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 16)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
@@ -19170,7 +19170,7 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, 16)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
v.AddArg(x)
return true
}
@@ -19178,7 +19178,7 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
// cond: isARM64BFMask(sc, 1<<8-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 8)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
@@ -19187,25 +19187,25 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, 8)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (UBFIZ [bfc] x))
- // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) {
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
@@ -19247,17 +19247,17 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
// cond: lc > rc
// result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc > rc) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
v.AddArg(x)
return true
}
@@ -19265,17 +19265,17 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
// cond: lc <= rc
// result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc <= rc) {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg(x)
return true
}
@@ -19283,7 +19283,7 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
// cond: rc < 32
// result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWreg {
break
}
@@ -19292,7 +19292,7 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc, 32-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
v.AddArg(x)
return true
}
@@ -19300,7 +19300,7 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
// cond: rc < 16
// result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHreg {
break
}
@@ -19309,7 +19309,7 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc, 16-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
v.AddArg(x)
return true
}
@@ -19317,7 +19317,7 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
// cond: rc < 8
// result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBreg {
break
}
@@ -19326,43 +19326,43 @@ func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc, 8-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
- // cond: sc < getARM64BFlsb(bfc)
- // result: (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFlsb(bfc)) {
+ if !(sc < bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
- // cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+ if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
@@ -19404,8 +19404,8 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: 0 < c && c < 64
// result: (ANDconst [1<<uint(64-c)-1] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
x := v_0.Args[0]
@@ -19413,7 +19413,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = 1<<uint(64-c) - 1
+ v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1)
v.AddArg(x)
return true
}
@@ -19421,17 +19421,17 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: lc > rc
// result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc > rc) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
v.AddArg(x)
return true
}
@@ -19439,17 +19439,17 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: isARM64BFMask(sc, ac, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
@@ -19457,7 +19457,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: isARM64BFMask(sc, 1<<32-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
@@ -19466,7 +19466,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
v.AddArg(x)
return true
}
@@ -19474,7 +19474,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: isARM64BFMask(sc, 1<<16-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
@@ -19483,7 +19483,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
v.AddArg(x)
return true
}
@@ -19491,7 +19491,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: isARM64BFMask(sc, 1<<8-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
@@ -19500,7 +19500,7 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
v.AddArg(x)
return true
}
@@ -19508,89 +19508,89 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// cond: lc < rc
// result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < rc) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFX [bfc] x))
- // cond: sc < getARM64BFwidth(bfc)
- // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFwidth(bfc)) {
+ if !(sc < bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc == getARM64BFlsb(bfc)
- // result: (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc == getARM64BFlsb(bfc)) {
+ if !(sc == bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = 1<<uint(getARM64BFwidth(bfc)) - 1
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc < getARM64BFlsb(bfc)
- // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFlsb(bfc)) {
+ if !(sc < bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
@@ -19807,13 +19807,13 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64SUBshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -19826,13 +19826,13 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64SUBshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -19845,13 +19845,13 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool {
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64SUBshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -19917,14 +19917,14 @@ func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
// match: (SUBshiftLL x (MOVDconst [c]) [d])
// result: (SUBconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64SUBconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -19932,17 +19932,17 @@ func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -19953,14 +19953,14 @@ func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
// match: (SUBshiftRA x (MOVDconst [c]) [d])
// result: (SUBconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64SUBconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -19968,17 +19968,17 @@ func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -19989,14 +19989,14 @@ func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
// match: (SUBshiftRL x (MOVDconst [c]) [d])
// result: (SUBconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64SUBconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -20004,17 +20004,17 @@ func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -20048,13 +20048,13 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool {
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64TSTshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -20070,13 +20070,13 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool {
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64TSTshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -20092,13 +20092,13 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool {
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64TSTshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -20166,16 +20166,16 @@ func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool {
// match: (TSTshiftLL (MOVDconst [c]) x [d])
// result: (TSTconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64TSTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -20183,14 +20183,14 @@ func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool {
// match: (TSTshiftLL x (MOVDconst [c]) [d])
// result: (TSTconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64TSTconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -20203,16 +20203,16 @@ func rewriteValueARM64_OpARM64TSTshiftRA(v *Value) bool {
// match: (TSTshiftRA (MOVDconst [c]) x [d])
// result: (TSTconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64TSTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -20220,14 +20220,14 @@ func rewriteValueARM64_OpARM64TSTshiftRA(v *Value) bool {
// match: (TSTshiftRA x (MOVDconst [c]) [d])
// result: (TSTconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64TSTconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -20240,16 +20240,16 @@ func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
// match: (TSTshiftRL (MOVDconst [c]) x [d])
// result: (TSTconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64TSTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -20257,14 +20257,14 @@ func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
// match: (TSTshiftRL x (MOVDconst [c]) [d])
// result: (TSTconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64TSTconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -20273,20 +20273,20 @@ func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFIZ [bfc] (SLLconst [sc] x))
- // cond: sc < getARM64BFwidth(bfc)
- // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFwidth(bfc)) {
+ if !(sc < bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
@@ -20295,74 +20295,74 @@ func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFX [bfc] (SRLconst [sc] x))
- // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) {
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc == getARM64BFlsb(bfc)
- // result: (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc == getARM64BFlsb(bfc)) {
+ if !(sc == bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = 1<<uint(getARM64BFwidth(bfc)) - 1
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc < getARM64BFlsb(bfc)
- // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFlsb(bfc)) {
+ if !(sc < bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
@@ -20657,13 +20657,13 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64XORshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -20679,13 +20679,13 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64XORshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -20701,13 +20701,13 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64XORshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
@@ -20728,14 +20728,14 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
@@ -20751,15 +20751,15 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -20768,11 +20768,11 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
@@ -20798,14 +20798,14 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
@@ -20821,15 +20821,15 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -20838,11 +20838,11 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
@@ -20866,14 +20866,14 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
@@ -20890,15 +20890,15 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -20907,11 +20907,11 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
@@ -20941,14 +20941,14 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
@@ -20964,15 +20964,15 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
@@ -20981,11 +20981,11 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool {
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
@@ -21055,16 +21055,16 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
// match: (XORshiftLL (MOVDconst [c]) x [d])
// result: (XORconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -21072,14 +21072,14 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
// match: (XORshiftLL x (MOVDconst [c]) [d])
// result: (XORconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
@@ -21087,24 +21087,24 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (XORshiftLL [c] (SRLconst x [64-c]) x)
// result: (RORconst [64-c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
@@ -21112,7 +21112,7 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg(x)
return true
}
@@ -21121,24 +21121,24 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
// result: (RORWconst [32-c] x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg(x)
return true
}
// match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// result: (REV16W x)
for {
- if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
@@ -21152,14 +21152,14 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
// match: (XORshiftLL [c] (SRLconst x [64-c]) x2)
// result: (EXTRconst [64-c] x2 x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
x2 := v_1
v.reset(OpARM64EXTRconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg2(x2, x)
return true
}
@@ -21168,18 +21168,18 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
x2 := v_1
if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg2(x2, x)
return true
}
@@ -21192,16 +21192,16 @@ func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
// match: (XORshiftRA (MOVDconst [c]) x [d])
// result: (XORconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -21209,14 +21209,14 @@ func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
// match: (XORshiftRA x (MOVDconst [c]) [d])
// result: (XORconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
@@ -21224,17 +21224,17 @@ func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
@@ -21246,16 +21246,16 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
// match: (XORshiftRL (MOVDconst [c]) x [d])
// result: (XORconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -21263,14 +21263,14 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
// match: (XORshiftRL x (MOVDconst [c]) [d])
// result: (XORconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
@@ -21278,24 +21278,24 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (XORshiftRL [c] (SLLconst x [64-c]) x)
// result: (RORconst [ c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
@@ -21303,7 +21303,7 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
@@ -21312,8 +21312,8 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
// result: (RORWconst [c] x)
for {
t := v.Type
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
break
}
x := v_0.Args[0]
@@ -21321,7 +21321,7 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}