aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2023-04-01 10:33:26 -0700
committerGopher Robot <gobot@golang.org>2023-04-24 13:43:33 +0000
commitca9947db78f4dd37052e163850ed23e8c47d7e21 (patch)
tree5a522a6f37be6cd6d88fabe092db5d890e3bec8c
parent130a7f8e8d5d3ecb7f17a741cc18e96f134fc38a (diff)
downloadgo-ca9947db78f4dd37052e163850ed23e8c47d7e21.tar.gz
go-ca9947db78f4dd37052e163850ed23e8c47d7e21.zip
[release-branch.go1.19] cmd/compile: use correct type for byteswaps on multi-byte stores
Use the type of the store for the byteswap, not the type of the store's value argument. Normally when we're storing a 16-bit value, the value being stored is also typed as 16 bits. But sometimes it is typed as something smaller, usually because it is the result of an upcast from a smaller value, and that upcast needs no instructions. If the type of the store's arg is thinner than the type being stored, and the byteswap'd value uses that thinner type, and the byteswap'd value needs to be spilled & restored, that spill/restore happens using the thinner type, which causes us to lose some of the top bits of the value. Fixes #59373 Change-Id: If6ce1e8a76f18bf8e9d79871b6caa438bc3cce4d Reviewed-on: https://go-review.googlesource.com/c/go/+/481395 Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-on: https://go-review.googlesource.com/c/go/+/483177 Auto-Submit: Dmitri Shuralyov <dmitshur@google.com>
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules12
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules36
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go24
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go74
-rw-r--r--test/fixedbugs/issue59367.go80
5 files changed, 154 insertions, 72 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index c0a376e352..1c54b8cb75 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -1855,13 +1855,13 @@
x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
&& x0.Uses == 1
&& clobber(x0)
- => (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+ => (MOVWstore [i-1] {s} p (ROLWconst <typ.UInt16> [8] w) mem)
(MOVBstore [i] {s} p1 w
x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
&& x0.Uses == 1
&& sequentialAddresses(p0, p1, 1)
&& clobber(x0)
- => (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+ => (MOVWstore [i] {s} p0 (ROLWconst <typ.UInt16> [8] w) mem)
// Combine stores + shifts into bswap and larger (unaligned) stores
(MOVBstore [i] {s} p w
@@ -1872,7 +1872,7 @@
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- => (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+ => (MOVLstore [i-3] {s} p (BSWAPL <typ.UInt32> w) mem)
(MOVBstore [i] {s} p3 w
x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w)
x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w)
@@ -1884,7 +1884,7 @@
&& sequentialAddresses(p1, p2, 1)
&& sequentialAddresses(p2, p3, 1)
&& clobber(x0, x1, x2)
- => (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+ => (MOVLstore [i] {s} p0 (BSWAPL <typ.UInt32> w) mem)
(MOVBstore [i] {s} p w
x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
@@ -1902,7 +1902,7 @@
&& x5.Uses == 1
&& x6.Uses == 1
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- => (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+ => (MOVQstore [i-7] {s} p (BSWAPQ <typ.UInt64> w) mem)
(MOVBstore [i] {s} p7 w
x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w)
x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w)
@@ -1926,7 +1926,7 @@
&& sequentialAddresses(p5, p6, 1)
&& sequentialAddresses(p6, p7, 1)
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- => (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+ => (MOVQstore [i] {s} p0 (BSWAPQ <typ.UInt64> w) mem)
// Combine constant stores into larger (unaligned) stores.
(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index b5df4cf47b..daac115697 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -2747,7 +2747,7 @@
&& x5.Uses == 1
&& x6.Uses == 1
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- => (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+ => (MOVDstore [i-7] {s} ptr (REV <typ.UInt64> w) mem)
(MOVBstore [7] {s} p w
x0:(MOVBstore [6] {s} p (SRLconst [8] w)
x1:(MOVBstore [5] {s} p (SRLconst [16] w)
@@ -2767,7 +2767,7 @@
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- => (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+ => (MOVDstoreidx ptr0 idx0 (REV <typ.UInt64> w) mem)
(MOVBstore [i] {s} ptr w
x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
@@ -2776,7 +2776,7 @@
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ => (MOVWstore [i-3] {s} ptr (REVW <typ.UInt32> w) mem)
(MOVBstore [3] {s} p w
x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
@@ -2788,7 +2788,7 @@
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2)
- => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr0 idx0 (REVW <typ.UInt32> w) mem)
(MOVBstoreidx ptr (ADDconst [3] idx) w
x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
@@ -2797,7 +2797,7 @@
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- => (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr idx (REVW <typ.UInt32> w) mem)
(MOVBstoreidx ptr idx w
x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
@@ -2815,7 +2815,7 @@
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ => (MOVWstore [i-3] {s} ptr (REVW <typ.UInt32> w) mem)
(MOVBstore [3] {s} p w
x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w))
x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w))
@@ -2827,7 +2827,7 @@
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2)
- => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr0 idx0 (REVW <typ.UInt32> w) mem)
(MOVBstore [i] {s} ptr w
x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
@@ -2836,7 +2836,7 @@
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ => (MOVWstore [i-3] {s} ptr (REVW <typ.UInt32> w) mem)
(MOVBstore [3] {s} p w
x0:(MOVBstore [2] {s} p (SRLconst [8] w)
x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w)
@@ -2848,31 +2848,31 @@
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2)
- => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr0 idx0 (REVW <typ.UInt32> w) mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
&& x.Uses == 1
&& clobber(x)
- => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
- => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
- => (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr idx (REV16W <typ.UInt16> w) mem)
(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
@@ -2880,23 +2880,23 @@
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
&& x.Uses == 1
&& clobber(x)
- => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
&& x.Uses == 1
&& clobber(x)
- => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
// FP simplification
(FNEGS (FMULS x y)) => (FNMULS x y)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 341fcc2f07..f0853089de 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -10498,7 +10498,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
// cond: x0.Uses == 1 && clobber(x0)
- // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+ // result: (MOVWstore [i-1] {s} p (ROLWconst <typ.UInt16> [8] w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -10519,7 +10519,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16)
v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
@@ -10527,7 +10527,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
// cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
- // result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+ // result: (MOVWstore [i] {s} p0 (ROLWconst <typ.UInt16> [8] w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -10546,7 +10546,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVWstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16)
v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
@@ -10554,7 +10554,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
- // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+ // result: (MOVLstore [i-3] {s} p (BSWAPL <typ.UInt32> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -10599,14 +10599,14 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = int32ToAuxInt(i - 3)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
- // result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+ // result: (MOVLstore [i] {s} p0 (BSWAPL <typ.UInt32> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -10645,14 +10645,14 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVLstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
- // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+ // result: (MOVQstore [i-7] {s} p (BSWAPQ <typ.UInt64> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -10745,14 +10745,14 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = int32ToAuxInt(i - 7)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, typ.UInt64)
v0.AddArg(w)
v.AddArg3(p, v0, mem)
return true
}
// match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
- // result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+ // result: (MOVQstore [i] {s} p0 (BSWAPQ <typ.UInt64> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -10831,7 +10831,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
v.reset(OpAMD64MOVQstore)
v.AuxInt = int32ToAuxInt(i)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, typ.UInt64)
v0.AddArg(w)
v.AddArg3(p0, v0, mem)
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index cd6ffa726f..d49a6bab89 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -7798,6 +7798,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
@@ -8468,7 +8469,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w) x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w) x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w) x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
- // result: (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+ // result: (MOVDstore [i-7] {s} ptr (REV <typ.UInt64> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -8561,14 +8562,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVDstore)
v.AuxInt = int32ToAuxInt(i - 7)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type)
+ v0 := b.NewValue0(x6.Pos, OpARM64REV, typ.UInt64)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem))))))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)
- // result: (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+ // result: (MOVDstoreidx ptr0 idx0 (REV <typ.UInt64> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 7 {
break
@@ -8667,7 +8668,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVDstoreidx)
- v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type)
+ v0 := b.NewValue0(x5.Pos, OpARM64REV, typ.UInt64)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -8676,7 +8677,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
- // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <typ.UInt32> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -8721,14 +8722,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 3)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
- // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <typ.UInt32> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 3 {
break
@@ -8779,7 +8780,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVWstoreidx)
- v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -8788,7 +8789,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w)) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
- // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <typ.UInt32> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -8845,14 +8846,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 3)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
- // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <typ.UInt32> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 3 {
break
@@ -8915,7 +8916,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVWstoreidx)
- v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -8924,7 +8925,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
- // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <typ.UInt32> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -8969,14 +8970,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVWstore)
v.AuxInt = int32ToAuxInt(i - 3)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
- // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <typ.UInt32> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 3 {
break
@@ -9027,7 +9028,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVWstoreidx)
- v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -9036,7 +9037,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
// cond: x.Uses == 1 && clobber(x)
- // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -9057,14 +9058,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVHstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
- // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 1 {
break
@@ -9092,7 +9093,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVHstoreidx)
- v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -9101,7 +9102,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && clobber(x)
- // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -9122,14 +9123,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVHstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
- // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 1 {
break
@@ -9157,7 +9158,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVHstoreidx)
- v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -9166,7 +9167,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
// cond: x.Uses == 1 && clobber(x)
- // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -9191,14 +9192,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVHstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
- // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 1 {
break
@@ -9230,7 +9231,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVHstoreidx)
- v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -9239,7 +9240,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
}
// match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
// cond: x.Uses == 1 && clobber(x)
- // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <typ.UInt16> w) mem)
for {
i := auxIntToInt32(v.AuxInt)
s := auxToSym(v.Aux)
@@ -9260,14 +9261,14 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
v.reset(OpARM64MOVHstore)
v.AuxInt = int32ToAuxInt(i - 1)
v.Aux = symToAux(s)
- v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
- // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <typ.UInt16> w) mem)
for {
if auxIntToInt32(v.AuxInt) != 1 {
break
@@ -9295,7 +9296,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
continue
}
v.reset(OpARM64MOVHstoreidx)
- v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg4(ptr0, idx0, v0, mem)
return true
@@ -9310,6 +9311,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
// cond: is32Bit(c)
// result: (MOVBstore [int32(c)] ptr val mem)
@@ -9472,7 +9474,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
}
// match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
- // result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+ // result: (MOVWstoreidx ptr idx (REVW <typ.UInt32> w) mem)
for {
ptr := v_0
if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 3 {
@@ -9525,7 +9527,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
v.reset(OpARM64MOVWstoreidx)
- v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, typ.UInt32)
v0.AddArg(w)
v.AddArg4(ptr, idx, v0, mem)
return true
@@ -9591,7 +9593,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
}
// match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && clobber(x)
- // result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+ // result: (MOVHstoreidx ptr idx (REV16W <typ.UInt16> w) mem)
for {
ptr := v_0
if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
@@ -9612,7 +9614,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
break
}
v.reset(OpARM64MOVHstoreidx)
- v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, typ.UInt16)
v0.AddArg(w)
v.AddArg4(ptr, idx, v0, mem)
return true
diff --git a/test/fixedbugs/issue59367.go b/test/fixedbugs/issue59367.go
new file mode 100644
index 0000000000..32a79e1208
--- /dev/null
+++ b/test/fixedbugs/issue59367.go
@@ -0,0 +1,80 @@
+// run
+
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ var b [8]byte
+ one := uint8(1)
+ f16(&one, b[:2])
+ if b[1] != 1 {
+ println("2-byte value lost")
+ }
+ f32(&one, b[:4])
+ if b[3] != 1 {
+ println("4-byte value lost")
+ }
+ f64(&one, b[:8])
+ if b[7] != 1 {
+ println("8-byte value lost")
+ }
+}
+
+//go:noinline
+func f16(p *uint8, b []byte) {
+ _ = b[1] // bounds check
+ x := *p // load a byte
+ y := uint16(x) // zero extend to 16 bits
+ b[0] = byte(y >> 8) // compute ROLW
+ b[1] = byte(y)
+ nop() // spill/restore ROLW
+ b[0] = byte(y >> 8) // use ROLW
+ b[1] = byte(y)
+}
+
+//go:noinline
+func f32(p *uint8, b []byte) {
+ _ = b[3] // bounds check
+ x := *p // load a byte
+ y := uint32(x) // zero extend to 32 bits
+ b[0] = byte(y >> 24) // compute ROLL
+ b[1] = byte(y >> 16)
+ b[2] = byte(y >> 8)
+ b[3] = byte(y)
+ nop() // spill/restore ROLL
+ b[0] = byte(y >> 24) // use ROLL
+ b[1] = byte(y >> 16)
+ b[2] = byte(y >> 8)
+ b[3] = byte(y)
+}
+
+//go:noinline
+func f64(p *uint8, b []byte) {
+ _ = b[7] // bounds check
+ x := *p // load a byte
+ y := uint64(x) // zero extend to 64 bits
+ b[0] = byte(y >> 56) // compute ROLQ
+ b[1] = byte(y >> 48)
+ b[2] = byte(y >> 40)
+ b[3] = byte(y >> 32)
+ b[4] = byte(y >> 24)
+ b[5] = byte(y >> 16)
+ b[6] = byte(y >> 8)
+ b[7] = byte(y)
+ nop() // spill/restore ROLQ
+ b[0] = byte(y >> 56) // use ROLQ
+ b[1] = byte(y >> 48)
+ b[2] = byte(y >> 40)
+ b[3] = byte(y >> 32)
+ b[4] = byte(y >> 24)
+ b[5] = byte(y >> 16)
+ b[6] = byte(y >> 8)
+ b[7] = byte(y)
+}
+
+//go:noinline
+func nop() {
+}