aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/gen
diff options
context:
space:
mode:
authorAlberto Donizetti <alb.donizetti@gmail.com>2021-03-05 11:22:13 +0100
committerAlberto Donizetti <alb.donizetti@gmail.com>2021-03-09 08:19:14 +0000
commitb70a2bc9c612de35b765712bd689865f6a1716b6 (patch)
tree7e538fba518ca7a8a706601214c5e700d7979979 /src/cmd/compile/internal/ssa/gen
parent437d229e2ac4cda4265090375b94d74ca218a846 (diff)
downloadgo-b70a2bc9c612de35b765712bd689865f6a1716b6.tar.gz
go-b70a2bc9c612de35b765712bd689865f6a1716b6.zip
cmd/compile: make ValAndOff.{Val,Off} return an int32
The ValAndOff type is a 64bit integer holding a 32bit value and a 32bit offset in each half, but for historical reasons its Val and Off methods returned an int64. This was convenient when AuxInt was always an int64, but now that AuxInts are typed we can return int32 from Val and Off and get rid of a several casts and now unnecessary range checks. This change: - changes the Val and Off methods to return an int32 (from int64); - adds Val64 and Off64 methods for convenience in the few remaining places (in the ssa.go files) where Val and Off are stored in int64 fields; - deletes makeValAndOff64, renames makeValAndOff32 to makeValAndOff - deletes a few ValAndOff methods that are now unused; - removes several validOff/validValAndOff check that will always return true. Passes: GOARCH=amd64 gotip build -toolexec 'toolstash -cmp' -a std GOARCH=386 gotip build -toolexec 'toolstash -cmp' -a std GOARCH=s390x gotip build -toolexec 'toolstash -cmp' -a std (the three GOARCHs with SSA rules files impacted by the change). Change-Id: I2abbbf42188c798631b94d3a55ca44256f140be7 Reviewed-on: https://go-review.googlesource.com/c/go/+/299149 Trust: Alberto Donizetti <alb.donizetti@gmail.com> Trust: Keith Randall <khr@golang.org> Run-TryBot: Alberto Donizetti <alb.donizetti@gmail.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/cmd/compile/internal/ssa/gen')
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules75
-rw-r--r--src/cmd/compile/internal/ssa/gen/386splitload.rules6
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules95
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64splitload.rules32
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules40
5 files changed, 123 insertions, 125 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index d6d122dc78..199b73c42f 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -258,17 +258,17 @@
(Zero [4] destptr mem) => (MOVLstoreconst [0] destptr mem)
(Zero [3] destptr mem) =>
- (MOVBstoreconst [makeValAndOff32(0,2)] destptr
- (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [5] destptr mem) =>
- (MOVBstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [6] destptr mem) =>
- (MOVWstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [7] destptr mem) =>
- (MOVLstoreconst [makeValAndOff32(0,3)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
// Strip off any fractional word zeroing.
(Zero [s] destptr mem) && s%4 != 0 && s > 4 =>
@@ -277,17 +277,17 @@
// Zero small numbers of words directly.
(Zero [8] destptr mem) =>
- (MOVLstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [12] destptr mem) =>
- (MOVLstoreconst [makeValAndOff32(0,8)] destptr
- (MOVLstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)))
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))
(Zero [16] destptr mem) =>
- (MOVLstoreconst [makeValAndOff32(0,12)] destptr
- (MOVLstoreconst [makeValAndOff32(0,8)] destptr
- (MOVLstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))))
+ (MOVLstoreconst [makeValAndOff(0,12)] destptr
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))))
// Medium zeroing uses a duff device.
(Zero [s] destptr mem)
@@ -621,12 +621,12 @@
((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
// Fold constants into stores.
-(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
- (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
- (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
-(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
- (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
// Fold address offsets into constant stores.
(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
@@ -676,8 +676,8 @@
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
- && y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) =>
- ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
+ && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
// fold LEALs together
(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
@@ -995,49 +995,49 @@
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
- => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
- => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
(MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
- => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem)
(MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 1)
&& clobber(x)
- => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& a.Off() + 2 == c.Off()
&& clobber(x)
- => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
&& clobber(x)
- => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
(MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
- => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem)
(MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
&& x.Uses == 1
&& a.Off() == c.Off()
&& sequentialAddresses(p0, p1, 2)
&& clobber(x)
- => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem)
// Combine stores into larger (unaligned) stores.
(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -1099,13 +1099,12 @@
(CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c])
&& l.Uses == 1
- && validValAndOff(int64(c), int64(off))
&& clobber(l) =>
- @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem)
+ @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
-(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
-(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
-(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/386splitload.rules b/src/cmd/compile/internal/ssa/gen/386splitload.rules
index ed93b90b73..29d4f8c227 100644
--- a/src/cmd/compile/internal/ssa/gen/386splitload.rules
+++ b/src/cmd/compile/internal/ssa/gen/386splitload.rules
@@ -6,6 +6,6 @@
(CMP(L|W|B)load {sym} [off] ptr x mem) => (CMP(L|W|B) (MOV(L|W|B)load {sym} [off] ptr mem) x)
-(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
-(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
-(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
+(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 7b03034bb7..c61b460a56 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -318,46 +318,46 @@
// Lowering Zero instructions
(Zero [0] _ mem) => mem
-(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem)
-(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)
-(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)
-(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
(Zero [3] destptr mem) =>
- (MOVBstoreconst [makeValAndOff32(0,2)] destptr
- (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [5] destptr mem) =>
- (MOVBstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [6] destptr mem) =>
- (MOVWstoreconst [makeValAndOff32(0,4)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [7] destptr mem) =>
- (MOVLstoreconst [makeValAndOff32(0,3)] destptr
- (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
// Strip off any fractional word zeroing.
(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE =>
(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
- (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) && !config.useSSE =>
- (MOVQstoreconst [makeValAndOff32(0,8)] destptr
- (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [24] destptr mem) && !config.useSSE =>
- (MOVQstoreconst [makeValAndOff32(0,16)] destptr
- (MOVQstoreconst [makeValAndOff32(0,8)] destptr
- (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))
+ (MOVQstoreconst [makeValAndOff(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
(Zero [32] destptr mem) && !config.useSSE =>
- (MOVQstoreconst [makeValAndOff32(0,24)] destptr
- (MOVQstoreconst [makeValAndOff32(0,16)] destptr
- (MOVQstoreconst [makeValAndOff32(0,8)] destptr
- (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))))
+ (MOVQstoreconst [makeValAndOff(0,24)] destptr
+ (MOVQstoreconst [makeValAndOff(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
- (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr
- (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
// Adjust zeros to be a multiple of 16 bytes.
(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
@@ -366,7 +366,7 @@
(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
- (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
(Zero [16] destptr mem) && config.useSSE =>
(MOVOstorezero destptr mem)
@@ -1122,13 +1122,13 @@
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
- (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
- (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
- (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+ (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
- (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+ (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores.
(MOV(Q|L|W|B)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
@@ -1868,32 +1868,32 @@
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
- => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
- => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& a.Off() + 2 == c.Off()
&& clobber(x)
- => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
&& x.Uses == 1
&& a.Off() + 2 == c.Off()
&& clobber(x)
- => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
&& x.Uses == 1
&& a.Off() + 4 == c.Off()
&& clobber(x)
- => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
+ => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
&& x.Uses == 1
&& a.Off() + 4 == c.Off()
&& clobber(x)
- => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
+ => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
&& config.useSSE
&& x.Uses == 1
@@ -1901,7 +1901,7 @@
&& c.Val() == 0
&& c2.Val() == 0
&& clobber(x)
- => (MOVOstorezero [c2.Off32()] {s} p mem)
+ => (MOVOstorezero [c2.Off()] {s} p mem)
// Combine stores into larger (unaligned) stores. Little endian.
(MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -2120,11 +2120,11 @@
(MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) =>
- ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) =>
- ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
@@ -2184,23 +2184,22 @@
(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
&& l.Uses == 1
&& clobber(l) =>
-@l.Block (CMP(Q|L)constload {sym} [makeValAndOff32(c,off)] ptr mem)
+@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
&& l.Uses == 1
&& clobber(l) =>
-@l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
-(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,int64(off)) => (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
-(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
-(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
-(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
+(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
&& l == l2
&& l.Uses == 2
- && validValAndOff(0, int64(off))
&& clobber(l) =>
- @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
// Convert ANDload to MOVload when we can do the AND in a containing TEST op.
// Only do when it's within the same block, so we don't have flags live across basic block boundaries.
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules b/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
index a50d509d0d..dd8f8ac4a1 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
@@ -18,28 +18,28 @@
(CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x)
-(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off32()] ptr mem) x)
+(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off()] ptr mem) x)
-(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
-(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
-(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
-(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
+(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
(CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x)
(CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
(CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
(CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
-(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off32()] ptr idx mem) x)
-(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x)
-(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x)
-(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x)
+(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off()] ptr idx mem) x)
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x)
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x)
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x)
-(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
-(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
-(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
-(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()])
+(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()])
-(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
-(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
-(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 1f75f78a71..0fdd231d71 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -386,13 +386,13 @@
// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
- (MVC [makeValAndOff32(int32(s), 0)] dst src mem)
+ (MVC [makeValAndOff(int32(s), 0)] dst src mem)
(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
- (MVC [makeValAndOff32(int32(s)-256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))
+ (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
- (MVC [makeValAndOff32(int32(s)-512, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)))
+ (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
- (MVC [makeValAndOff32(int32(s)-768, 768)] dst src (MVC [makeValAndOff32(256, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))))
+ (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
// Move more than 1024 bytes using a loop.
(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
@@ -405,20 +405,20 @@
(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
(Zero [3] destptr mem) =>
- (MOVBstoreconst [makeValAndOff32(0,2)] destptr
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVHstoreconst [0] destptr mem))
(Zero [5] destptr mem) =>
- (MOVBstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [6] destptr mem) =>
- (MOVHstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVHstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [7] destptr mem) =>
- (MOVWstoreconst [makeValAndOff32(0,3)] destptr
+ (MOVWstoreconst [makeValAndOff(0,3)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [s] destptr mem) && s > 0 && s <= 1024 =>
- (CLEAR [makeValAndOff32(int32(s), 0)] destptr mem)
+ (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
// Zero more than 1024 bytes using a loop.
(Zero [s] destptr mem) && s > 1024 =>
@@ -948,22 +948,22 @@
// Fold constants into stores.
(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
- (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
- (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
- (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+ (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
- (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+ (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
// Fold address offsets into constant stores.
-(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) =>
+(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
(MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
-(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) =>
+(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
(MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
-(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) =>
+(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
(MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
-(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off()+int64(off)) =>
+(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) =>
(MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
// Merge address calculations into loads and stores.
@@ -1306,19 +1306,19 @@
&& x.Uses == 1
&& a.Off() + 1 == c.Off()
&& clobber(x)
- => (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem)
+ => (MOVHstoreconst [makeValAndOff(c.Val()&0xff | a.Val()<<8, a.Off())] {s} p mem)
(MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
&& p.Op != OpSB
&& x.Uses == 1
&& a.Off() + 2 == c.Off()
&& clobber(x)
- => (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem)
+ => (MOVWstore [a.Off()] {s} p (MOVDconst [int64(c.Val()&0xffff | a.Val()<<16)]) mem)
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& p.Op != OpSB
&& x.Uses == 1
&& a.Off() + 4 == c.Off()
&& clobber(x)
- => (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem)
+ => (MOVDstore [a.Off()] {s} p (MOVDconst [c.Val64()&0xffffffff | a.Val64()<<32]) mem)
// Combine stores into larger (unaligned) stores.
// It doesn't work on global data (based on SB) because stores with relative addressing