aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2017-08-28 12:57:52 -0700
committerRuss Cox <rsc@golang.org>2017-10-25 20:23:57 +0000
commit79996e4a1d33b7404ee076d7455ff8dcc7270250 (patch)
tree0e62307877640daf6477d9f065b89f10578b6a9f
parentf36b12657c71753029aeefa5e0af3c1607ffe9bb (diff)
downloadgo-79996e4a1d33b7404ee076d7455ff8dcc7270250.tar.gz
go-79996e4a1d33b7404ee076d7455ff8dcc7270250.zip
[release-branch.go1.9] cmd/compile: avoid generating large offsets
The assembler barfs on large offsets. Make sure that all the instructions that need to have their offsets in an int32 1) check on any rule that computes offsets for such instructions 2) change their aux fields so the check builder checks it. The assembler also silently misassembled offsets between 1<<31 and 1<<32. Add a check in the assembler to barf on those as well. Fixes #21655 Change-Id: Iebf24bf10f9f37b3ea819ceb7d588251c0f46d7d Reviewed-on: https://go-review.googlesource.com/59630 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com> Reviewed-on: https://go-review.googlesource.com/70981 Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules140
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go17
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go16
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go362
-rw-r--r--src/cmd/internal/obj/x86/asm6.go7
-rw-r--r--test/fixedbugs/issue21655.go40
6 files changed, 414 insertions, 168 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 1900f5e794..f1d53bc51f 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -1177,82 +1177,82 @@
(MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
// combine ADDQ into indexed loads and stores
-(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
-(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
-(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
-
-(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
-(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
-
-(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
-(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
-(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
-
-(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
-(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
-
-(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
+(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+
+(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
+(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+
+(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem)
+(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+
+(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
+(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+
+(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) ->
+(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) ->
(MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) ->
(MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) ->
(MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
-(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) ->
(MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) ->
+(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) ->
(MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
// fold LEAQs together
@@ -2301,22 +2301,22 @@
(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
-(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index c51cbd2238..c984cbfb12 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -20,6 +20,7 @@ import "strings"
// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
// filled by sign-extending the used portion. Users of AuxInt which interpret
// AuxInt as unsigned (e.g. shifts) must be careful.
+// - All SymOff opcodes require their offset to fit in an int32.
// Suffixes encode the bit width of various instructions.
// Q (quad word) = 64 bit
@@ -189,17 +190,17 @@ func init() {
// binary ops
{name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
{name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
- {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
{name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
{name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
{name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
- {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
{name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
{name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
{name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
- {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
{name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
{name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
@@ -221,24 +222,24 @@ func init() {
{name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
{name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
- {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
{name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
{name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
{name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
- {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
{name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
{name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
{name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
- {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
- {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint
+ {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
{name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
{name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
{name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
@@ -255,7 +256,7 @@ func init() {
{name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
- {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0
+ {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
{name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
{name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
{name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index ae2dd5f550..763a1cbd4d 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -4824,7 +4824,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "ADDQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
clobberFlags: true,
asm: x86.AADDQ,
@@ -4886,7 +4886,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "SUBQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
@@ -4952,7 +4952,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "MULQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
@@ -5232,7 +5232,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "ANDQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
@@ -5298,7 +5298,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "ORQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
@@ -5364,7 +5364,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "XORQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
resultInArg0: true,
clobberFlags: true,
@@ -5440,7 +5440,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "CMPQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
asm: x86.ACMPQ,
reg: regInfo{
@@ -5598,7 +5598,7 @@ var opcodeTable = [...]opInfo{
},
{
name: "TESTQconst",
- auxType: auxInt64,
+ auxType: auxInt32,
argLen: 1,
asm: x86.ATESTQ,
reg: regInfo{
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index f9a94cac36..f2f4896410 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -4680,7 +4680,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
return true
}
// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
@@ -4694,7 +4694,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVBload)
@@ -4732,7 +4732,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -4746,6 +4746,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -4755,7 +4758,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
return true
}
// match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -4769,6 +4772,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -4778,7 +4784,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
return true
}
// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -4792,6 +4798,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -4801,7 +4810,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
return true
}
// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -4815,6 +4824,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -5440,7 +5452,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
return true
}
// match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
@@ -5455,7 +5467,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVBstore)
@@ -5679,7 +5691,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -5693,6 +5705,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -5702,7 +5717,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -5716,6 +5731,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -5766,7 +5784,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -5781,6 +5799,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -5791,7 +5812,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -5806,6 +5827,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -6837,7 +6861,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool {
return true
}
// match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
@@ -6851,7 +6875,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool {
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVLload)
@@ -6939,7 +6963,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
return true
}
// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -6953,6 +6977,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -6962,7 +6989,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
return true
}
// match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -6976,6 +7003,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -6985,7 +7015,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
return true
}
// match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -6999,6 +7029,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -7008,7 +7041,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
return true
}
// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -7022,6 +7055,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -7034,7 +7070,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -7048,6 +7084,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c + d
v.Aux = sym
@@ -7057,7 +7096,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
return true
}
// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -7071,6 +7110,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVLloadidx4)
v.AuxInt = c + 4*d
v.Aux = sym
@@ -7390,7 +7432,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
@@ -7405,7 +7447,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVLstore)
@@ -7693,7 +7735,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -7707,6 +7749,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -7716,7 +7761,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -7730,6 +7775,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -7785,7 +7833,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -7799,6 +7847,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -7808,7 +7859,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
return true
}
// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(4*c)
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -7822,6 +7873,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(4 * c)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconstidx4)
v.AuxInt = ValAndOff(x).add(4 * c)
v.Aux = sym
@@ -7903,7 +7957,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -7918,6 +7972,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -7928,7 +7985,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -7943,6 +8000,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -8063,7 +8123,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool {
b := v.Block
_ = b
// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -8078,6 +8138,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c + d
v.Aux = sym
@@ -8088,7 +8151,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool {
return true
}
// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -8103,6 +8166,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVLstoreidx4)
v.AuxInt = c + 4*d
v.Aux = sym
@@ -8543,7 +8609,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
return true
}
// match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
@@ -8557,7 +8623,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVQload)
@@ -8645,7 +8711,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
return true
}
// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -8659,6 +8725,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -8668,7 +8737,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
return true
}
// match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -8682,6 +8751,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -8691,7 +8763,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
return true
}
// match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -8705,6 +8777,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -8714,7 +8789,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
return true
}
// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -8728,6 +8803,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -8740,7 +8818,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -8754,6 +8832,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c + d
v.Aux = sym
@@ -8763,7 +8844,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
return true
}
// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -8777,6 +8858,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVQloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
@@ -8954,7 +9038,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool {
return true
}
// match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
@@ -8969,7 +9053,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool {
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVQstore)
@@ -9215,7 +9299,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -9229,6 +9313,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -9238,7 +9325,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -9252,6 +9339,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -9264,7 +9354,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -9278,6 +9368,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -9287,7 +9380,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
return true
}
// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(8*c)
// result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -9301,6 +9394,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(8 * c)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconstidx8)
v.AuxInt = ValAndOff(x).add(8 * c)
v.Aux = sym
@@ -9340,7 +9436,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9355,6 +9451,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -9365,7 +9464,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9380,6 +9479,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -9393,7 +9495,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9408,6 +9510,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c + d
v.Aux = sym
@@ -9418,7 +9523,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
return true
}
// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9433,6 +9538,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVQstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
@@ -9605,7 +9713,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
return true
}
// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -9619,6 +9727,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -9628,7 +9739,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
return true
}
// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -9642,6 +9753,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -9654,7 +9768,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -9668,6 +9782,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c + d
v.Aux = sym
@@ -9677,7 +9794,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
return true
}
// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -9691,6 +9808,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVSDloadidx8)
v.AuxInt = c + 8*d
v.Aux = sym
@@ -9874,7 +9994,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9889,6 +10009,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -9899,7 +10022,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9914,6 +10037,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -9927,7 +10053,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9942,6 +10068,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c + d
v.Aux = sym
@@ -9952,7 +10081,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
return true
}
// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+8*d)
// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -9967,6 +10096,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 8*d)) {
+ break
+ }
v.reset(OpAMD64MOVSDstoreidx8)
v.AuxInt = c + 8*d
v.Aux = sym
@@ -10139,7 +10271,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
return true
}
// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -10153,6 +10285,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -10162,7 +10297,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
return true
}
// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -10176,6 +10311,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -10188,7 +10326,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -10202,6 +10340,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c + d
v.Aux = sym
@@ -10211,7 +10352,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
return true
}
// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -10225,6 +10366,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVSSloadidx4)
v.AuxInt = c + 4*d
v.Aux = sym
@@ -10408,7 +10552,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -10423,6 +10567,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -10433,7 +10580,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -10448,6 +10595,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -10461,7 +10611,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -10476,6 +10626,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c + d
v.Aux = sym
@@ -10486,7 +10639,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
return true
}
// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+4*d)
// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -10501,6 +10654,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 4*d)) {
+ break
+ }
v.reset(OpAMD64MOVSSstoreidx4)
v.AuxInt = c + 4*d
v.Aux = sym
@@ -11028,7 +11184,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
return true
}
// match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
@@ -11042,7 +11198,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
sym2 := v_0.Aux
base := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVWload)
@@ -11130,7 +11286,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
return true
}
// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -11144,6 +11300,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -11153,7 +11312,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
return true
}
// match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -11167,6 +11326,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
d := v_1.AuxInt
ptr := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -11176,7 +11338,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
return true
}
// match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -11190,6 +11352,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -11199,7 +11364,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
return true
}
// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -11213,6 +11378,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -11225,7 +11393,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -11239,6 +11407,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c + d
v.Aux = sym
@@ -11248,7 +11419,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
return true
}
// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond:
+ // cond: is32Bit(c+2*d)
// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
for {
c := v.AuxInt
@@ -11262,6 +11433,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
d := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(is32Bit(c + 2*d)) {
+ break
+ }
v.reset(OpAMD64MOVWloadidx2)
v.AuxInt = c + 2*d
v.Aux = sym
@@ -11581,7 +11755,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
// match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: canMergeSym(sym1, sym2)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
@@ -11596,7 +11770,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
base := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
v.reset(OpAMD64MOVWstore)
@@ -11873,7 +12047,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -11887,6 +12061,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -11896,7 +12073,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool {
return true
}
// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -11910,6 +12087,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx1)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -11960,7 +12140,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(c)
// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -11974,6 +12154,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool {
ptr := v_0.Args[0]
idx := v.Args[1]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(c)
v.Aux = sym
@@ -11983,7 +12166,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool {
return true
}
// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
- // cond:
+ // cond: ValAndOff(x).canAdd(2*c)
// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
for {
x := v.AuxInt
@@ -11997,6 +12180,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool {
c := v_1.AuxInt
idx := v_1.Args[0]
mem := v.Args[2]
+ if !(ValAndOff(x).canAdd(2 * c)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconstidx2)
v.AuxInt = ValAndOff(x).add(2 * c)
v.Aux = sym
@@ -12075,7 +12261,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -12090,6 +12276,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -12100,7 +12289,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool {
return true
}
// match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -12115,6 +12304,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx1)
v.AuxInt = c + d
v.Aux = sym
@@ -12235,7 +12427,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
- // cond:
+ // cond: is32Bit(c+d)
// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -12250,6 +12442,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool {
idx := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c + d
v.Aux = sym
@@ -12260,7 +12455,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool {
return true
}
// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
- // cond:
+ // cond: is32Bit(c+2*d)
// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
for {
c := v.AuxInt
@@ -12275,6 +12470,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool {
idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
+ if !(is32Bit(c + 2*d)) {
+ break
+ }
v.reset(OpAMD64MOVWstoreidx2)
v.AuxInt = c + 2*d
v.Aux = sym
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index bcf9318e2e..5f3a8c45d5 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2269,6 +2269,13 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
return Yxxx
}
if ctxt.Arch.Family == sys.AMD64 {
+ // Offset must fit in a 32-bit signed field (or fit in a 32-bit unsigned field
+ // where the sign extension doesn't matter).
+ // Note: The latter happens only in assembly, for example crypto/sha1/sha1block_amd64.s.
+ if !(a.Offset == int64(int32(a.Offset)) ||
+ a.Offset == int64(uint32(a.Offset)) && p.As == ALEAL) {
+ return Yxxx
+ }
switch a.Name {
case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF:
// Global variables can't use index registers and their
diff --git a/test/fixedbugs/issue21655.go b/test/fixedbugs/issue21655.go
new file mode 100644
index 0000000000..4060c8ddbb
--- /dev/null
+++ b/test/fixedbugs/issue21655.go
@@ -0,0 +1,40 @@
+// compile
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure assembly offsets don't get too large.
+
+// To trigger issue21655, the index offset needs to be small
+// enough to fit into an int32 (to get rewritten to an ADDQconst)
+// but large enough to overflow an int32 after multiplying by the stride.
+
+package main
+
+func f1(a []int64, i int64) int64 {
+ return a[i+1<<30]
+}
+func f2(a []int32, i int64) int32 {
+ return a[i+1<<30]
+}
+func f3(a []int16, i int64) int16 {
+ return a[i+1<<30]
+}
+func f4(a []int8, i int64) int8 {
+ return a[i+1<<31]
+}
+func f5(a []float64, i int64) float64 {
+ return a[i+1<<30]
+}
+func f6(a []float32, i int64) float32 {
+ return a[i+1<<30]
+}
+
+// Note: Before the fix for issue 21655, f{1,2,5,6} made
+// the compiler crash. f3 silently generated the wrong
+// code, using an offset of -1<<31 instead of 1<<31.
+// (This is due to the assembler accepting offsets
+// like 0x80000000 and silently using them as
+// signed 32 bit offsets.)
+// f4 was ok, but testing it can't hurt.