From a5d1a9df812706708da1940898cf8cad8d5f9de2 Mon Sep 17 00:00:00 2001
From: "Bryan C. Mills"
Date: Fri, 27 Mar 2020 12:11:21 -0400
Subject: net/http: remove arbitrary timeouts from TestIdentityResponse and
TestTLSHandshakeTimeout
These hard-coded timeouts make the tests flaky on slow builders (such
as solaris-amd64-oraclerel), and make test failures harder to diagnose
anyway (by replacing dumps of the stuck goroutine stacks with failure
messages that do not describe the stuck goroutines). Eliminate them
and simplify the tests.
Fixes #37327
Fixes #38112
Change-Id: Id40febe349d134ef53c702e36199bfbf2b6468ff
Reviewed-on: https://go-review.googlesource.com/c/go/+/225977
Run-TryBot: Bryan C. Mills
TryBot-Result: Gobot Gobot
Reviewed-by: Brad Fitzpatrick
---
src/net/http/serve_test.go | 93 ++++++++++++++++++----------------------------
1 file changed, 37 insertions(+), 56 deletions(-)
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
index 21ee7f33c8..9488821466 100644
--- a/src/net/http/serve_test.go
+++ b/src/net/http/serve_test.go
@@ -1057,16 +1057,13 @@ func TestIdentityResponse(t *testing.T) {
t.Fatalf("error writing: %v", err)
}
- // The ReadAll will hang for a failing test, so use a Timer to
- // fail explicitly.
- goTimeout(t, 2*time.Second, func() {
- got, _ := ioutil.ReadAll(conn)
- expectedSuffix := "\r\n\r\ntoo short"
- if !strings.HasSuffix(string(got), expectedSuffix) {
- t.Errorf("Expected output to end with %q; got response body %q",
- expectedSuffix, string(got))
- }
- })
+ // The ReadAll will hang for a failing test.
+ got, _ := ioutil.ReadAll(conn)
+ expectedSuffix := "\r\n\r\ntoo short"
+ if !strings.HasSuffix(string(got), expectedSuffix) {
+ t.Errorf("Expected output to end with %q; got response body %q",
+ expectedSuffix, string(got))
+ }
}
func testTCPConnectionCloses(t *testing.T, req string, h Handler) {
@@ -1438,13 +1435,13 @@ func TestTLSHandshakeTimeout(t *testing.T) {
t.Fatalf("Dial: %v", err)
}
defer conn.Close()
- goTimeout(t, 10*time.Second, func() {
- var buf [1]byte
- n, err := conn.Read(buf[:])
- if err == nil || n != 0 {
- t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
- }
- })
+
+ var buf [1]byte
+ n, err := conn.Read(buf[:])
+ if err == nil || n != 0 {
+ t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
+ }
+
select {
case v := <-errc:
if !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") {
@@ -1479,30 +1476,29 @@ func TestTLSServer(t *testing.T) {
t.Fatalf("Dial: %v", err)
}
defer idleConn.Close()
- goTimeout(t, 10*time.Second, func() {
- if !strings.HasPrefix(ts.URL, "https://") {
- t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
- return
- }
- client := ts.Client()
- res, err := client.Get(ts.URL)
- if err != nil {
- t.Error(err)
- return
- }
- if res == nil {
- t.Errorf("got nil Response")
- return
- }
- defer res.Body.Close()
- if res.Header.Get("X-TLS-Set") != "true" {
- t.Errorf("expected X-TLS-Set response header")
- return
- }
- if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
- t.Errorf("expected X-TLS-HandshakeComplete header")
- }
- })
+
+ if !strings.HasPrefix(ts.URL, "https://") {
+ t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
+ return
+ }
+ client := ts.Client()
+ res, err := client.Get(ts.URL)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if res == nil {
+ t.Errorf("got nil Response")
+ return
+ }
+ defer res.Body.Close()
+ if res.Header.Get("X-TLS-Set") != "true" {
+ t.Errorf("expected X-TLS-Set response header")
+ return
+ }
+ if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
+ t.Errorf("expected X-TLS-HandshakeComplete header")
+ }
}
func TestServeTLS(t *testing.T) {
@@ -3629,21 +3625,6 @@ func TestHeaderToWire(t *testing.T) {
}
}
-// goTimeout runs f, failing t if f takes more than ns to complete.
-func goTimeout(t *testing.T, d time.Duration, f func()) {
- ch := make(chan bool, 2)
- timer := time.AfterFunc(d, func() {
- t.Errorf("Timeout expired after %v", d)
- ch <- true
- })
- defer timer.Stop()
- go func() {
- defer func() { ch <- true }()
- f()
- }()
- <-ch
-}
-
type errorListener struct {
errs []error
}
--
cgit v1.2.3-54-g00ecf
From 4a8b9bd2646a5b297197ffd1c412ef6afebe5c0d Mon Sep 17 00:00:00 2001
From: Michael Pratt
Date: Thu, 26 Mar 2020 15:10:21 -0400
Subject: runtime/pprof: increment fake overflow record PC
gentraceback generates PCs which are usually following the CALL
instruction. For those that aren't, it fixes up the PCs so that
functions processing the output can unconditionally decrement the PC.
runtime_expandInlineFrames does this unconditional decrement when
looking up the function. However, the fake stack frame generated for
overflow records fails to meet the contract, and decrementing the PC
results in a PC in the previous function. If that function contains
inlined call, runtime_expandInlineFrames will not short-circuit and will
panic trying to look up a PC that doesn't exist.
Note that the added test does not fail at HEAD. It will only fail (with
a panic) if the function preceeding lostProfileEvent contains inlined
function calls. At the moment (on linux/amd64), that is
runtime/pprof.addMaxRSS, which does not.
Fixes #38096
Change-Id: Iad0819f23c566011c920fd9a5b1254719228da0b
Reviewed-on: https://go-review.googlesource.com/c/go/+/225661
Reviewed-by: Hyang-Ah Hana Kim
Reviewed-by: Heschi Kreinick
Reviewed-by: Keith Randall
Run-TryBot: Michael Pratt
TryBot-Result: Gobot Gobot
---
src/runtime/pprof/pprof_test.go | 12 ++++++++++++
src/runtime/pprof/proto.go | 5 ++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 5bfc3b6134..83b3152d68 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -1171,6 +1171,18 @@ func TestTryAdd(t *testing.T) {
{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
},
+ }, {
+ name: "bug38096",
+ input: []uint64{
+ 3, 0, 500, // hz = 500. Must match the period.
+ // count (data[2]) == 0 && len(stk) == 1 is an overflow
+ // entry. The "stk" entry is actually the count.
+ 4, 0, 0, 4242,
+ },
+ wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
+ wantSamples: []*profile.Sample{
+ {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
+ },
}, {
// If a function is called recursively then it must not be
// inlined in the caller.
diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go
index 416ace7ab2..bb63153a70 100644
--- a/src/runtime/pprof/proto.go
+++ b/src/runtime/pprof/proto.go
@@ -322,7 +322,10 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error
// overflow record
count = uint64(stk[0])
stk = []uint64{
- uint64(funcPC(lostProfileEvent)),
+ // gentraceback guarantees that PCs in the
+ // stack can be unconditionally decremented and
+ // still be valid, so we must do the same.
+ uint64(funcPC(lostProfileEvent)+1),
}
}
b.m.lookup(stk, tag).count += int64(count)
--
cgit v1.2.3-54-g00ecf
From af7eafd1505f9e150aa9fc21cd3f19da42a30333 Mon Sep 17 00:00:00 2001
From: Keith Randall
Date: Tue, 24 Mar 2020 13:39:44 -0700
Subject: cmd/compile: convert 386 port to use addressing modes pass (take 2)
Retrying CL 222782, with a fix that will hopefully stop the random crashing.
The issue with the previous CL is that it does pointer arithmetic
in a way that may briefly generate an out-of-bounds pointer. If an
interrupt happens to occur in that state, the referenced object may
be collected incorrectly.
Suppose there was code that did s[x+c]. The previous CL had a rule
to the effect of ptr + (x + c) -> c + (ptr + x). But ptr+x is not
guaranteed to point to the same object as ptr. In contrast,
ptr+(x+c) is guaranteed to point to the same object as ptr, because
we would have already checked that x+c is in bounds.
For example, strconv.trim used to have this code:
MOVZX -0x1(BX)(DX*1), BP
CMPL $0x30, AL
After CL 222782, it had this code:
LEAL 0(BX)(DX*1), BP
CMPB $0x30, -0x1(BP)
An interrupt between those last two instructions could see BP pointing
outside the backing store of the slice involved.
It's really hard to actually demonstrate a bug. First, you need to
have an interrupt occur at exactly the right time. Then, there must
be no other pointers to the object in question. Since the interrupted
frame will be scanned conservatively, there can't even be a dead
pointer in another register or on the stack. (In the example above,
a bug can't happen because BX still holds the original pointer.)
Then, the object in question needs to be collected (or at least
scanned?) before the interrupted code continues.
This CL needs to handle load combining somewhat differently than CL 222782
because of the new restriction on arithmetic. That's the only real
difference (other than removing the bad rules) from that old CL.
This bug is also present in the amd64 rewrite rules, and we haven't
seen any crashing as a result. I will fix up that code similarly to
this one in a separate CL.
Update #37881
Change-Id: I5f0d584d9bef4696bfe89a61ef0a27c8d507329f
Reviewed-on: https://go-review.googlesource.com/c/go/+/225798
Run-TryBot: Keith Randall
TryBot-Result: Gobot Gobot
Reviewed-by: Cherry Zhang
---
src/cmd/compile/internal/ssa/addressingmodes.go | 83 +-
src/cmd/compile/internal/ssa/gen/386.rules | 242 +-
src/cmd/compile/internal/ssa/gen/AMD64.rules | 1 +
src/cmd/compile/internal/ssa/gen/generic.rules | 6 +-
src/cmd/compile/internal/ssa/gen/rulegen.go | 5 +
src/cmd/compile/internal/ssa/rewrite.go | 16 +
src/cmd/compile/internal/ssa/rewrite386.go | 6156 ++++-------------------
src/cmd/compile/internal/ssa/rewritegeneric.go | 8 +-
test/codegen/memops.go | 78 +-
9 files changed, 1270 insertions(+), 5325 deletions(-)
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
index 8874b56a9b..2af8a4d1fc 100644
--- a/src/cmd/compile/internal/ssa/addressingmodes.go
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -11,8 +11,8 @@ func addressingModes(f *Func) {
default:
// Most architectures can't do this.
return
- case "amd64":
- // TODO: 386, s390x?
+ case "amd64", "386":
+ // TODO: s390x?
}
var tmp []*Value
@@ -21,7 +21,17 @@ func addressingModes(f *Func) {
if !combineFirst[v.Op] {
continue
}
- p := v.Args[0]
+ // All matched operations have the pointer in arg[0].
+ // All results have the pointer in arg[0] and the index in arg[1].
+ // *Except* for operations which update a register,
+ // which are marked with resultInArg0. Those have
+ // the pointer in arg[1], and the corresponding result op
+ // has the pointer in arg[1] and the index in arg[2].
+ ptrIndex := 0
+ if opcodeTable[v.Op].resultInArg0 {
+ ptrIndex = 1
+ }
+ p := v.Args[ptrIndex]
c, ok := combine[[2]Op{v.Op, p.Op}]
if !ok {
continue
@@ -71,10 +81,11 @@ func addressingModes(f *Func) {
f.Fatalf("unknown aux combining for %s and %s\n", v.Op, p.Op)
}
// Combine the operations.
- tmp = append(tmp[:0], v.Args[1:]...)
+ tmp = append(tmp[:0], v.Args[:ptrIndex]...)
+ tmp = append(tmp, p.Args...)
+ tmp = append(tmp, v.Args[ptrIndex+1:]...)
v.resetArgs()
v.Op = c
- v.AddArgs(p.Args...)
v.AddArgs(tmp...)
}
}
@@ -97,6 +108,7 @@ func init() {
// x.Args[0].Args + x.Args[1:]
// Additionally, the Aux/AuxInt from x.Args[0] is merged into x.
var combine = map[[2]Op]Op{
+ // amd64
[2]Op{OpAMD64MOVBload, OpAMD64ADDQ}: OpAMD64MOVBloadidx1,
[2]Op{OpAMD64MOVWload, OpAMD64ADDQ}: OpAMD64MOVWloadidx1,
[2]Op{OpAMD64MOVLload, OpAMD64ADDQ}: OpAMD64MOVLloadidx1,
@@ -150,5 +162,64 @@ var combine = map[[2]Op]Op{
[2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1,
[2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8,
- // TODO: 386
+ // 386
+ [2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVLload, Op386ADDL}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVSSload, Op386ADDL}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSDload, Op386ADDL}: Op386MOVSDloadidx1,
+
+ [2]Op{Op386MOVBstore, Op386ADDL}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386ADDL}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVLstore, Op386ADDL}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386ADDL}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386ADDL}: Op386MOVSDstoreidx1,
+
+ [2]Op{Op386MOVBstoreconst, Op386ADDL}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386ADDL}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386ADDL}: Op386MOVLstoreconstidx1,
+
+ [2]Op{Op386MOVBload, Op386LEAL1}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL1}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL2}: Op386MOVWloadidx2,
+ [2]Op{Op386MOVLload, Op386LEAL1}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVLload, Op386LEAL4}: Op386MOVLloadidx4,
+ [2]Op{Op386MOVSSload, Op386LEAL1}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSSload, Op386LEAL4}: Op386MOVSSloadidx4,
+ [2]Op{Op386MOVSDload, Op386LEAL1}: Op386MOVSDloadidx1,
+ [2]Op{Op386MOVSDload, Op386LEAL8}: Op386MOVSDloadidx8,
+
+ [2]Op{Op386MOVBstore, Op386LEAL1}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL1}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL2}: Op386MOVWstoreidx2,
+ [2]Op{Op386MOVLstore, Op386LEAL1}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVLstore, Op386LEAL4}: Op386MOVLstoreidx4,
+ [2]Op{Op386MOVSSstore, Op386LEAL1}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386LEAL4}: Op386MOVSSstoreidx4,
+ [2]Op{Op386MOVSDstore, Op386LEAL1}: Op386MOVSDstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386LEAL8}: Op386MOVSDstoreidx8,
+
+ [2]Op{Op386MOVBstoreconst, Op386LEAL1}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL1}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL2}: Op386MOVWstoreconstidx2,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL1}: Op386MOVLstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL4}: Op386MOVLstoreconstidx4,
+
+ [2]Op{Op386ADDLload, Op386LEAL4}: Op386ADDLloadidx4,
+ [2]Op{Op386SUBLload, Op386LEAL4}: Op386SUBLloadidx4,
+ [2]Op{Op386MULLload, Op386LEAL4}: Op386MULLloadidx4,
+ [2]Op{Op386ANDLload, Op386LEAL4}: Op386ANDLloadidx4,
+ [2]Op{Op386ORLload, Op386LEAL4}: Op386ORLloadidx4,
+ [2]Op{Op386XORLload, Op386LEAL4}: Op386XORLloadidx4,
+
+ [2]Op{Op386ADDLmodify, Op386LEAL4}: Op386ADDLmodifyidx4,
+ [2]Op{Op386SUBLmodify, Op386LEAL4}: Op386SUBLmodifyidx4,
+ [2]Op{Op386ANDLmodify, Op386LEAL4}: Op386ANDLmodifyidx4,
+ [2]Op{Op386ORLmodify, Op386LEAL4}: Op386ORLmodifyidx4,
+ [2]Op{Op386XORLmodify, Op386LEAL4}: Op386XORLmodifyidx4,
+
+ [2]Op{Op386ADDLconstmodify, Op386LEAL4}: Op386ADDLconstmodifyidx4,
+ [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4,
+ [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4,
+ [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4,
}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 64a6cbaf84..2c48994a5f 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -588,10 +588,6 @@
(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWLSXload [off] {sym} ptr mem)
(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem)
-(MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem)
-(MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem)
-(MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem)
-
// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBLZX x)
(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWLZX x)
@@ -611,34 +607,22 @@
// fold constants into memory operations
// Note that this is not always a good idea because if not all the uses of
-// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
-// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
+// the ADDLconst get eliminated, we still have to compute the ADDLconst and we now
+// have potentially two live values (ptr and (ADDLconst [off] ptr)) instead of one.
// Nevertheless, let's do it!
(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
-((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {sym} val base idx mem)
-((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) && is32Bit(off1+off2*4) ->
- ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2*4] {sym} val base idx mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
-((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2] {sym} base idx val mem)
-((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) && is32Bit(off1+off2*4) ->
- ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2*4] {sym} base idx val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
-((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) && ValAndOff(valoff1).canAdd(off2) ->
- ((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
-((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) && ValAndOff(valoff1).canAdd(off2*4) ->
- ((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
// Fold constants into stores.
(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
@@ -652,7 +636,7 @@
(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
(MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {s} ptr mem)
-// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
+// We need to fold LEAL into the MOVx ops so that the live variable analysis knows
// what variables are being read/written by the ops.
// Note: we turn off this merging for operations on globals when building
// position-independent code (when Flag_shared is set).
@@ -672,31 +656,9 @@
&& (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
(MOV(L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-// generating indexed loads and stores
-(MOV(B|W|L|SS|SD)load [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOV(B|W|L|SS|SD)loadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOV(L|SS)load [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOV(L|SS)loadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-
-(MOV(B|W|L|SS|SD)store [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOV(B|W|L|SS|SD)storeidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOV(L|SS)store [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOV(L|SS)storeidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-(MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
@@ -706,97 +668,20 @@
((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
-((ADD|AND|OR|XOR)Lconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
- && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
- ((ADD|AND|OR|XOR)Lconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
-
-(MOV(B|W|L|SS|SD)load [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOV(B|W|L|SS|SD)loadidx1 [off] {sym} ptr idx mem)
-(MOV(B|W|L|SS|SD)store [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOV(B|W|L|SS|SD)storeidx1 [off] {sym} ptr idx val mem)
-
-(MOV(B|W|L)storeconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-(MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
- (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-
-(MOV(B|W|L)storeconst [x] {sym} (ADDL ptr idx) mem) -> (MOV(B|W|L)storeconstidx1 [x] {sym} ptr idx mem)
-
-// combine SHLL into indexed loads and stores
-(MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
-(MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
-(MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
-(MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
-
-// combine ADDL into indexed loads and stores
-(MOV(B|W|L|SS|SD)loadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOV(B|W|L|SS|SD)loadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOV(L|SS)loadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOV(L|SS)loadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
-
-(MOV(B|W|L|SS|SD)storeidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOV(B|W|L|SS|SD)storeidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOV(L|SS)storeidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOV(L|SS)storeidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
-
-(MOV(B|W|L|SS|SD)loadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOV(B|W|L|SS|SD)loadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [int64(int32(c+2*d))] {sym} ptr idx mem)
-(MOV(L|SS)loadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOV(L|SS)loadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
-
-(MOV(B|W|L|SS|SD)storeidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOV(B|W|L|SS|SD)storeidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [int64(int32(c+2*d))] {sym} ptr idx val mem)
-(MOV(L|SS)storeidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOV(L|SS)storeidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
// Merge load/store to op
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
-((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) && canMergeLoadClobber(v, l, x) && clobber(l) ->
- ((ADD|AND|OR|XOR|SUB|MUL)Lloadidx4 x [off] {sym} ptr idx mem)
-((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) ->
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
-(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|AND|OR|XOR)Lloadidx4 x [off] {sym} ptr idx mem) mem) && y.Uses==1 && clobber(y) ->
- ((ADD|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx x mem)
-(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|SUB|AND|OR|XOR)L l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) ->
- ((ADD|SUB|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx x mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
&& y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off) ->
((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
-(MOVLstoreidx4 {sym} [off] ptr idx y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
- && y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off) ->
- ((ADD|AND|OR|XOR)Lconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
-((ADD|AND|OR|XOR)Lmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) && validValAndOff(c,off) ->
- ((ADD|AND|OR|XOR)Lconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
-(SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) && validValAndOff(-c,off) ->
- (ADDLconstmodifyidx4 [makeValAndOff(-c,off)] {sym} ptr idx mem)
-
-(MOV(B|W|L)storeconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
- (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
- (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
- (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-
-(MOV(B|W|L)storeconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
- (MOV(B|W|L)storeconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-(MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
- (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
-(MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
- (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
// fold LEALs together
(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
@@ -826,6 +711,16 @@
(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(off1+2*off2) ->
+ (LEAL4 [off1+2*off2] {sym} x y)
+(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(off1+4*off2) ->
+ (LEAL8 [off1+4*off2] {sym} x y)
+
// Absorb InvertFlags into branches.
(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
@@ -1039,6 +934,9 @@
// TEST %reg,%reg is shorter than CMP
(CMP(L|W|B)const x [0]) -> (TEST(L|W|B) x x)
+// Convert LEAL1 back to ADDL if we can
+(LEAL1 [0] {nil} x y) -> (ADDL x y)
+
// Combining byte loads into larger (unaligned) loads.
// There are many ways these combinations could occur. This is
// designed to match the way encoding/binary.LittleEndian does it.
@@ -1052,6 +950,16 @@
&& clobber(x0, x1, s0)
-> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+(ORL x0:(MOVBload [i] {s} p0 mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, s0)
+ -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+
(ORL o0:(ORL
x0:(MOVWload [i0] {s} p mem)
s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))
@@ -1068,31 +976,21 @@
&& clobber(x0, x1, x2, s0, s1, o0)
-> @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
-(ORL x0:(MOVBloadidx1 [i0] {s} p idx mem)
- s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
- && i1==i0+1
- && x0.Uses == 1
- && x1.Uses == 1
- && s0.Uses == 1
- && mergePoint(b,x0,x1) != nil
- && clobber(x0, x1, s0)
- -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem)
-
(ORL o0:(ORL
- x0:(MOVWloadidx1 [i0] {s} p idx mem)
- s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))
- s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)))
- && i2 == i0+2
- && i3 == i0+3
+ x0:(MOVWload [i] {s} p0 mem)
+ s0:(SHLLconst [16] x1:(MOVBload [i] {s} p1 mem)))
+ s1:(SHLLconst [24] x2:(MOVBload [i] {s} p2 mem)))
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& s0.Uses == 1
&& s1.Uses == 1
&& o0.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && sequentialAddresses(p1, p2, 1)
&& mergePoint(b,x0,x1,x2) != nil
&& clobber(x0, x1, x2, s0, s1, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
+ -> @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p0 mem)
// Combine constant stores into larger (unaligned) stores.
(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
@@ -1105,6 +1003,20 @@
&& ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
&& clobber(x)
-> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+
+(MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() == ValAndOff(c).Off()
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p0 mem)
+(MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() == ValAndOff(c).Off()
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p0 mem)
+
(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
&& x.Uses == 1
&& ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
@@ -1116,22 +1028,18 @@
&& clobber(x)
-> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
-(MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
- && x.Uses == 1
- && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
- && clobber(x)
- -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
-(MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
+(MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && ValAndOff(a).Off() == ValAndOff(c).Off()
+ && sequentialAddresses(p0, p1, 2)
&& clobber(x)
- -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
-
-(MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
+ -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p0 mem)
+(MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
&& x.Uses == 1
- && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && ValAndOff(a).Off() == ValAndOff(c).Off()
+ && sequentialAddresses(p0, p1, 2)
&& clobber(x)
- -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst [1] i) mem)
+ -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p0 mem)
// Combine stores into larger (unaligned) stores.
(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
@@ -1146,44 +1054,42 @@
&& x.Uses == 1
&& clobber(x)
-> (MOVWstore [i-1] {s} p w0 mem)
-(MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
- && x.Uses == 1
- && clobber(x)
- -> (MOVLstore [i-2] {s} p w mem)
-(MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
- && x.Uses == 1
- && clobber(x)
- -> (MOVLstore [i-2] {s} p w0 mem)
-(MOVBstoreidx1 [i] {s} p idx (SHR(L|W)const [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+(MOVBstore [i] {s} p1 (SHR(W|L)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
&& x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
&& clobber(x)
- -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
-(MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHR(L|W)const [8] w) mem))
+ -> (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHR(W|L)const [8] w) mem))
&& x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
&& clobber(x)
- -> (MOVWstoreidx1 [i] {s} p idx w mem)
-(MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
+ -> (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
&& x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
&& clobber(x)
- -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
-(MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
+ -> (MOVWstore [i] {s} p0 w0 mem)
+
+(MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
-(MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+ -> (MOVLstore [i-2] {s} p w mem)
+(MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
+ -> (MOVLstore [i-2] {s} p w0 mem)
-(MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
+(MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
&& x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
&& clobber(x)
- -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w mem)
-(MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+ -> (MOVLstore [i] {s} p0 w mem)
+(MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
&& x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
&& clobber(x)
- -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w0 mem)
+ -> (MOVLstore [i] {s} p0 w0 mem)
// For PIC, break floating-point constant loading into two instructions so we have
// a register to use for holding the address of the constant pool entry.
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 306847d28c..ca5962f249 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -1604,6 +1604,7 @@
// Move constants offsets from LEAQx up into load. This lets the above combining
// rules discover indexed load-combining instances.
+//TODO:remove! These rules are bad.
(MOV(B|W|L|Q)load [i0] {s0} l:(LEAQ1 [i1] {s1} x y) mem) && i1 != 0 && is32Bit(i0+i1)
-> (MOV(B|W|L|Q)load [i0+i1] {s0} (LEAQ1 [0] {s1} x y) mem)
(MOV(B|W|L|Q)load [i0] {s0} l:(LEAQ2 [i1] {s1} x y) mem) && i1 != 0 && is32Bit(i0+i1)
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 8ec22d86e7..8a3c8eeaab 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -917,7 +917,7 @@
(If (ConstBool [c]) yes no) && c == 0 -> (First no yes)
// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer.
-(Convert (Add(64|32) (Convert ptr mem) off) mem) -> (Add(64|32) ptr off)
+(Convert (Add(64|32) (Convert ptr mem) off) mem) -> (AddPtr ptr off)
(Convert (Convert ptr mem) mem) -> ptr
// strength reduction of divide by a constant.
@@ -1780,6 +1780,10 @@
// is constant, which pushes constants to the outside
// of the expression. At that point, any constant-folding
// opportunities should be obvious.
+// Note: don't include AddPtr here! In order to maintain the
+// invariant that pointers must stay within the pointed-to object,
+// we can't pull part of a pointer computation above the AddPtr.
+// See issue 37881.
// x + (C + z) -> C + (x + z)
(Add64 (Add64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) -> (Add64 i (Add64 z x))
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 8e88d0b6a3..3caa06038a 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -715,6 +715,11 @@ func (w *bodyBase) add(node Statement) {
// declared reports if the body contains a Declare with the given name.
func (w *bodyBase) declared(name string) bool {
+ if name == "nil" {
+ // Treat "nil" as having already been declared.
+ // This lets us use nil to match an aux field.
+ return true
+ }
for _, s := range w.list {
if decl, ok := s.(*Declare); ok && decl.name == name {
return true
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index b3e7d34779..fc03f0d72c 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -1248,9 +1248,25 @@ func read64(sym interface{}, off int64, byteorder binary.ByteOrder) uint64 {
return byteorder.Uint64(buf)
}
+// sequentialAddresses reports true if it can prove that x + n == y
+func sequentialAddresses(x, y *Value, n int64) bool {
+ if x.Op == Op386ADDL && y.Op == Op386LEAL1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == Op386LEAL1 && y.Op == Op386LEAL1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ return false
+}
+
// same reports whether x and y are the same value.
// It checks to a maximum depth of d, so it may report
// a false negative.
+// TODO: remove when amd64 port is switched to using sequentialAddresses
func same(x, y *Value, depth int) bool {
if x == y {
return true
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index 8b2da94c13..2a0a92bb83 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -18,16 +18,10 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386ADDLconst(v)
case Op386ADDLconstmodify:
return rewriteValue386_Op386ADDLconstmodify(v)
- case Op386ADDLconstmodifyidx4:
- return rewriteValue386_Op386ADDLconstmodifyidx4(v)
case Op386ADDLload:
return rewriteValue386_Op386ADDLload(v)
- case Op386ADDLloadidx4:
- return rewriteValue386_Op386ADDLloadidx4(v)
case Op386ADDLmodify:
return rewriteValue386_Op386ADDLmodify(v)
- case Op386ADDLmodifyidx4:
- return rewriteValue386_Op386ADDLmodifyidx4(v)
case Op386ADDSD:
return rewriteValue386_Op386ADDSD(v)
case Op386ADDSDload:
@@ -42,16 +36,10 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386ANDLconst(v)
case Op386ANDLconstmodify:
return rewriteValue386_Op386ANDLconstmodify(v)
- case Op386ANDLconstmodifyidx4:
- return rewriteValue386_Op386ANDLconstmodifyidx4(v)
case Op386ANDLload:
return rewriteValue386_Op386ANDLload(v)
- case Op386ANDLloadidx4:
- return rewriteValue386_Op386ANDLloadidx4(v)
case Op386ANDLmodify:
return rewriteValue386_Op386ANDLmodify(v)
- case Op386ANDLmodifyidx4:
- return rewriteValue386_Op386ANDLmodifyidx4(v)
case Op386CMPB:
return rewriteValue386_Op386CMPB(v)
case Op386CMPBconst:
@@ -96,62 +84,28 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386MOVBLZX(v)
case Op386MOVBload:
return rewriteValue386_Op386MOVBload(v)
- case Op386MOVBloadidx1:
- return rewriteValue386_Op386MOVBloadidx1(v)
case Op386MOVBstore:
return rewriteValue386_Op386MOVBstore(v)
case Op386MOVBstoreconst:
return rewriteValue386_Op386MOVBstoreconst(v)
- case Op386MOVBstoreconstidx1:
- return rewriteValue386_Op386MOVBstoreconstidx1(v)
- case Op386MOVBstoreidx1:
- return rewriteValue386_Op386MOVBstoreidx1(v)
case Op386MOVLload:
return rewriteValue386_Op386MOVLload(v)
- case Op386MOVLloadidx1:
- return rewriteValue386_Op386MOVLloadidx1(v)
- case Op386MOVLloadidx4:
- return rewriteValue386_Op386MOVLloadidx4(v)
case Op386MOVLstore:
return rewriteValue386_Op386MOVLstore(v)
case Op386MOVLstoreconst:
return rewriteValue386_Op386MOVLstoreconst(v)
- case Op386MOVLstoreconstidx1:
- return rewriteValue386_Op386MOVLstoreconstidx1(v)
- case Op386MOVLstoreconstidx4:
- return rewriteValue386_Op386MOVLstoreconstidx4(v)
- case Op386MOVLstoreidx1:
- return rewriteValue386_Op386MOVLstoreidx1(v)
- case Op386MOVLstoreidx4:
- return rewriteValue386_Op386MOVLstoreidx4(v)
case Op386MOVSDconst:
return rewriteValue386_Op386MOVSDconst(v)
case Op386MOVSDload:
return rewriteValue386_Op386MOVSDload(v)
- case Op386MOVSDloadidx1:
- return rewriteValue386_Op386MOVSDloadidx1(v)
- case Op386MOVSDloadidx8:
- return rewriteValue386_Op386MOVSDloadidx8(v)
case Op386MOVSDstore:
return rewriteValue386_Op386MOVSDstore(v)
- case Op386MOVSDstoreidx1:
- return rewriteValue386_Op386MOVSDstoreidx1(v)
- case Op386MOVSDstoreidx8:
- return rewriteValue386_Op386MOVSDstoreidx8(v)
case Op386MOVSSconst:
return rewriteValue386_Op386MOVSSconst(v)
case Op386MOVSSload:
return rewriteValue386_Op386MOVSSload(v)
- case Op386MOVSSloadidx1:
- return rewriteValue386_Op386MOVSSloadidx1(v)
- case Op386MOVSSloadidx4:
- return rewriteValue386_Op386MOVSSloadidx4(v)
case Op386MOVSSstore:
return rewriteValue386_Op386MOVSSstore(v)
- case Op386MOVSSstoreidx1:
- return rewriteValue386_Op386MOVSSstoreidx1(v)
- case Op386MOVSSstoreidx4:
- return rewriteValue386_Op386MOVSSstoreidx4(v)
case Op386MOVWLSX:
return rewriteValue386_Op386MOVWLSX(v)
case Op386MOVWLSXload:
@@ -160,30 +114,16 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386MOVWLZX(v)
case Op386MOVWload:
return rewriteValue386_Op386MOVWload(v)
- case Op386MOVWloadidx1:
- return rewriteValue386_Op386MOVWloadidx1(v)
- case Op386MOVWloadidx2:
- return rewriteValue386_Op386MOVWloadidx2(v)
case Op386MOVWstore:
return rewriteValue386_Op386MOVWstore(v)
case Op386MOVWstoreconst:
return rewriteValue386_Op386MOVWstoreconst(v)
- case Op386MOVWstoreconstidx1:
- return rewriteValue386_Op386MOVWstoreconstidx1(v)
- case Op386MOVWstoreconstidx2:
- return rewriteValue386_Op386MOVWstoreconstidx2(v)
- case Op386MOVWstoreidx1:
- return rewriteValue386_Op386MOVWstoreidx1(v)
- case Op386MOVWstoreidx2:
- return rewriteValue386_Op386MOVWstoreidx2(v)
case Op386MULL:
return rewriteValue386_Op386MULL(v)
case Op386MULLconst:
return rewriteValue386_Op386MULLconst(v)
case Op386MULLload:
return rewriteValue386_Op386MULLload(v)
- case Op386MULLloadidx4:
- return rewriteValue386_Op386MULLloadidx4(v)
case Op386MULSD:
return rewriteValue386_Op386MULSD(v)
case Op386MULSDload:
@@ -202,16 +142,10 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386ORLconst(v)
case Op386ORLconstmodify:
return rewriteValue386_Op386ORLconstmodify(v)
- case Op386ORLconstmodifyidx4:
- return rewriteValue386_Op386ORLconstmodifyidx4(v)
case Op386ORLload:
return rewriteValue386_Op386ORLload(v)
- case Op386ORLloadidx4:
- return rewriteValue386_Op386ORLloadidx4(v)
case Op386ORLmodify:
return rewriteValue386_Op386ORLmodify(v)
- case Op386ORLmodifyidx4:
- return rewriteValue386_Op386ORLmodifyidx4(v)
case Op386ROLBconst:
return rewriteValue386_Op386ROLBconst(v)
case Op386ROLLconst:
@@ -278,12 +212,8 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386SUBLconst(v)
case Op386SUBLload:
return rewriteValue386_Op386SUBLload(v)
- case Op386SUBLloadidx4:
- return rewriteValue386_Op386SUBLloadidx4(v)
case Op386SUBLmodify:
return rewriteValue386_Op386SUBLmodify(v)
- case Op386SUBLmodifyidx4:
- return rewriteValue386_Op386SUBLmodifyidx4(v)
case Op386SUBSD:
return rewriteValue386_Op386SUBSD(v)
case Op386SUBSDload:
@@ -298,16 +228,10 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386XORLconst(v)
case Op386XORLconstmodify:
return rewriteValue386_Op386XORLconstmodify(v)
- case Op386XORLconstmodifyidx4:
- return rewriteValue386_Op386XORLconstmodifyidx4(v)
case Op386XORLload:
return rewriteValue386_Op386XORLload(v)
- case Op386XORLloadidx4:
- return rewriteValue386_Op386XORLloadidx4(v)
case Op386XORLmodify:
return rewriteValue386_Op386XORLmodify(v)
- case Op386XORLmodifyidx4:
- return rewriteValue386_Op386XORLmodifyidx4(v)
case OpAdd16:
v.Op = Op386ADDL
return true
@@ -1042,32 +966,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
}
break
}
- // match: (ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
- // cond: canMergeLoadClobber(v, l, x) && clobber(l)
- // result: (ADDLloadidx4 x [off] {sym} ptr idx mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- l := v_1
- if l.Op != Op386MOVLloadidx4 {
- continue
- }
- off := l.AuxInt
- sym := l.Aux
- mem := l.Args[2]
- ptr := l.Args[0]
- idx := l.Args[1]
- if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
- continue
- }
- v.reset(Op386ADDLloadidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(x, ptr, idx, mem)
- return true
- }
- break
- }
// match: (ADDL x (NEGL y))
// result: (SUBL x y)
for {
@@ -1316,81 +1214,6 @@ func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386ADDLconstmodifyidx4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ADDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2)) {
- break
- }
- v.reset(Op386ADDLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (ADDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
- // cond: ValAndOff(valoff1).canAdd(off2*4)
- // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
- break
- }
- v.reset(Op386ADDLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (ADDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ADDLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_Op386ADDLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -1442,109 +1265,6 @@ func rewriteValue386_Op386ADDLload(v *Value) bool {
v.AddArg3(val, base, mem)
return true
}
- // match: (ADDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ADDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL4 {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- idx := v_1.Args[1]
- ptr := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386ADDLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386ADDLloadidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ADDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
- // cond: is32Bit(off1+off2)
- // result: (ADDLloadidx4 [off1+off2] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386ADDLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (ADDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
- // cond: is32Bit(off1+off2*4)
- // result: (ADDLloadidx4 [off1+off2*4] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- base := v_1
- if v_2.Op != Op386ADDLconst {
- break
- }
- off2 := v_2.AuxInt
- idx := v_2.Args[0]
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386ADDLloadidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (ADDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ADDLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, base, idx, mem)
- return true
- }
return false
}
func rewriteValue386_Op386ADDLmodify(v *Value) bool {
@@ -1600,107 +1320,6 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386ADDLmodifyidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ADDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
- // cond: is32Bit(off1+off2)
- // result: (ADDLmodifyidx4 [off1+off2] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386ADDLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ADDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
- // cond: is32Bit(off1+off2*4)
- // result: (ADDLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386ADDLmodifyidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ADDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ADDLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ADDLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ADDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (ADDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- if v_2.Op != Op386MOVLconst {
- break
- }
- c := v_2.AuxInt
- mem := v_3
- if !(validValAndOff(c, off)) {
- break
- }
- v.reset(Op386ADDLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_Op386ADDSD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -1915,32 +1534,6 @@ func rewriteValue386_Op386ANDL(v *Value) bool {
}
break
}
- // match: (ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
- // cond: canMergeLoadClobber(v, l, x) && clobber(l)
- // result: (ANDLloadidx4 x [off] {sym} ptr idx mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- l := v_1
- if l.Op != Op386MOVLloadidx4 {
- continue
- }
- off := l.AuxInt
- sym := l.Aux
- mem := l.Args[2]
- ptr := l.Args[0]
- idx := l.Args[1]
- if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
- continue
- }
- v.reset(Op386ANDLloadidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(x, ptr, idx, mem)
- return true
- }
- break
- }
// match: (ANDL x x)
// result: x
for {
@@ -2057,101 +1650,26 @@ func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386ANDLconstmodifyidx4(v *Value) bool {
+func rewriteValue386_Op386ANDLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
- // match: (ANDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
+ // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (ANDLload [off1+off2] {sym} val base mem)
for {
- valoff1 := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
- if v_0.Op != Op386ADDLconst {
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
break
}
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2)) {
- break
- }
- v.reset(Op386ANDLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (ANDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
- // cond: ValAndOff(valoff1).canAdd(off2*4)
- // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
- break
- }
- v.reset(Op386ANDLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (ANDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ANDLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386ANDLload(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (ANDLload [off1+off2] {sym} val base mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1 + off2)) {
+ if !(is32Bit(off1 + off2)) {
break
}
v.reset(Op386ANDLload)
@@ -2183,109 +1701,6 @@ func rewriteValue386_Op386ANDLload(v *Value) bool {
v.AddArg3(val, base, mem)
return true
}
- // match: (ANDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ANDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL4 {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- idx := v_1.Args[1]
- ptr := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386ANDLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386ANDLloadidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ANDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
- // cond: is32Bit(off1+off2)
- // result: (ANDLloadidx4 [off1+off2] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386ANDLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (ANDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
- // cond: is32Bit(off1+off2*4)
- // result: (ANDLloadidx4 [off1+off2*4] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- base := v_1
- if v_2.Op != Op386ADDLconst {
- break
- }
- off2 := v_2.AuxInt
- idx := v_2.Args[0]
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386ANDLloadidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (ANDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ANDLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, base, idx, mem)
- return true
- }
return false
}
func rewriteValue386_Op386ANDLmodify(v *Value) bool {
@@ -2341,107 +1756,6 @@ func rewriteValue386_Op386ANDLmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386ANDLmodifyidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ANDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
- // cond: is32Bit(off1+off2)
- // result: (ANDLmodifyidx4 [off1+off2] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386ANDLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ANDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
- // cond: is32Bit(off1+off2*4)
- // result: (ANDLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386ANDLmodifyidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ANDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ANDLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ANDLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ANDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (ANDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- if v_2.Op != Op386MOVLconst {
- break
- }
- c := v_2.AuxInt
- mem := v_3
- if !(validValAndOff(c, off)) {
- break
- }
- v.reset(Op386ANDLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_Op386CMPB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -3768,6 +3082,76 @@ func rewriteValue386_Op386LEAL1(v *Value) bool {
}
break
}
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAL1 [0] {nil} x y)
+ // result: (ADDL x y)
+ for {
+ if v.AuxInt != 0 || v.Aux != nil {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(Op386ADDL)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValue386_Op386LEAL2(v *Value) bool {
@@ -3869,8 +3253,32 @@ func rewriteValue386_Op386LEAL2(v *Value) bool {
v.AddArg2(x, y)
return true
}
- return false
-}
+ // match: (LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(off1+2*off2)
+ // result: (LEAL4 [off1+2*off2] {sym} x y)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_1.AuxInt
+ if v_1.Aux != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(off1 + 2*off2)) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = off1 + 2*off2
+ v.Aux = sym
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386LEAL4(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -3954,6 +3362,30 @@ func rewriteValue386_Op386LEAL4(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(off1+4*off2)
+ // result: (LEAL8 [off1+4*off2] {sym} x y)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_1.AuxInt
+ if v_1.Aux != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(off1 + 4*off2)) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = off1 + 4*off2
+ v.Aux = sym
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValue386_Op386LEAL8(v *Value) bool {
@@ -4146,30 +3578,6 @@ func rewriteValue386_Op386MOVBLZX(v *Value) bool {
v0.AddArg2(ptr, mem)
return true
}
- // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem)
- for {
- x := v_0
- if x.Op != Op386MOVBloadidx1 {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- mem := x.Args[2]
- ptr := x.Args[0]
- idx := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, Op386MOVBloadidx1, v.Type)
- v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg3(ptr, idx, mem)
- return true
- }
// match: (MOVBLZX (ANDLconst [c] x))
// result: (ANDLconst [c & 0xff] x)
for {
@@ -4254,56 +3662,6 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
- // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVBloadidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVBload [off] {sym} (ADDL ptr idx) mem)
- // cond: ptr.Op != OpSB
- // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- mem := v_1
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVBloadidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
// match: (MOVBload [off] {sym} (SB) _)
// cond: symIsRO(sym)
// result: (MOVLconst [int64(read8(sym, off))])
@@ -4319,54 +3677,6 @@ func rewriteValue386_Op386MOVBload(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386MOVBloadidx1(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != Op386ADDLconst {
- continue
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVBloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- continue
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVBloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- return false
-}
func rewriteValue386_Op386MOVBstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -4473,58 +3783,6 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
- // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVBstoreidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem)
- // cond: ptr.Op != OpSB
- // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- val := v_1
- mem := v_2
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVBstoreidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
// match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWstore [i-1] {s} p w mem)
@@ -4657,6 +3915,134 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool {
v.AddArg3(p, w0, mem)
return true
}
+ // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p1 := v_0
+ if v_1.Op != Op386SHRWconst || v_1.AuxInt != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst || v_1.AuxInt != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := v_1.AuxInt
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
@@ -4707,55 +4093,15 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
- // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
for {
- x := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVBstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(off)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem)
- // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
- for {
- x := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- v.reset(Op386MOVBstoreconstidx1)
- v.AuxInt = x
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
- for {
- c := v.AuxInt
- s := v.Aux
- p := v_0
- x := v_1
- if x.Op != Op386MOVBstoreconst {
+ c := v.AuxInt
+ s := v.Aux
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
break
}
a := x.AuxInt
@@ -4797,296 +4143,57 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
v.AddArg2(p, mem)
return true
}
- return false
-}
-func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
- // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
- for {
- x := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- c := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVBstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
- // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
- for {
- x := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- c := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVBstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
+ // match: (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p0 mem)
for {
c := v.AuxInt
s := v.Aux
- p := v_0
- i := v_1
- x := v_2
- if x.Op != Op386MOVBstoreconstidx1 {
+ p1 := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
break
}
a := x.AuxInt
if x.Aux != s {
break
}
- mem := x.Args[2]
- if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 1) && clobber(x)) {
break
}
- v.reset(Op386MOVWstoreconstidx1)
+ v.reset(Op386MOVWstoreconst)
v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
v.Aux = s
- v.AddArg3(p, i, mem)
+ v.AddArg2(p0, mem)
return true
}
- return false
-}
-func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != Op386ADDLconst {
- continue
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVBstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
- // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- continue
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVBstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
- // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
- for {
- i := v.AuxInt
- s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- if v_2.Op != Op386SHRLconst || v_2.AuxInt != 8 {
- continue
- }
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = i - 1
- v.Aux = s
- v.AddArg4(p, idx, w, mem)
- return true
- }
- }
- break
- }
- // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
- for {
- i := v.AuxInt
- s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- if v_2.Op != Op386SHRWconst || v_2.AuxInt != 8 {
- continue
- }
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = i - 1
- v.Aux = s
- v.AddArg4(p, idx, w, mem)
- return true
- }
- }
- break
- }
- // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVWstoreidx1 [i] {s} p idx w mem)
+ // match: (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p0 mem)
for {
- i := v.AuxInt
+ a := v.AuxInt
s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- w := v_2
- x := v_3
- if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 {
- continue
- }
- x_2 := x.Args[2]
- if x_2.Op != Op386SHRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = i
- v.Aux = s
- v.AddArg4(p, idx, w, mem)
- return true
- }
+ p0 := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
}
- break
- }
- // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVWstoreidx1 [i] {s} p idx w mem)
- for {
- i := v.AuxInt
- s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- w := v_2
- x := v_3
- if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i+1 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 {
- continue
- }
- x_2 := x.Args[2]
- if x_2.Op != Op386SHRWconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = i
- v.Aux = s
- v.AddArg4(p, idx, w, mem)
- return true
- }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
}
- break
- }
- // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
- for {
- i := v.AuxInt
- s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- if v_2.Op != Op386SHRLconst {
- continue
- }
- j := v_2.AuxInt
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVBstoreidx1 || x.AuxInt != i-1 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 {
- continue
- }
- w0 := x.Args[2]
- if w0.Op != Op386SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = i - 1
- v.Aux = s
- v.AddArg4(p, idx, w0, mem)
- return true
- }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
}
- break
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg2(p0, mem)
+ return true
}
return false
}
@@ -5158,252 +4265,69 @@ func rewriteValue386_Op386MOVLload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
- // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ off := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
- v.reset(Op386MOVLloadidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))
return true
}
- // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ return false
+}
+func rewriteValue386_Op386MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL4 {
+ sym := v.Aux
+ if v_0.Op != Op386ADDLconst {
break
}
off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ val := v_1
+ mem := v_2
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MOVLloadidx4)
+ v.reset(Op386MOVLstore)
v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
+ v.Aux = sym
+ v.AddArg3(ptr, val, mem)
return true
}
- // match: (MOVLload [off] {sym} (ADDL ptr idx) mem)
- // cond: ptr.Op != OpSB
- // result: (MOVLloadidx1 [off] {sym} ptr idx mem)
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
- if v_0.Op != Op386ADDL {
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
break
}
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- mem := v_1
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVLloadidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
+ c := v_1.AuxInt
+ mem := v_2
+ if !(validOff(off)) {
+ break
}
- break
- }
- // match: (MOVLload [off] {sym} (SB) _)
- // cond: symIsRO(sym)
- // result: (MOVLconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != OpSB || !(symIsRO(sym)) {
- break
- }
- v.reset(Op386MOVLconst)
- v.AuxInt = int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVLloadidx1(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
- // result: (MOVLloadidx4 [c] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 {
- continue
- }
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVLloadidx4)
- v.AuxInt = c
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != Op386ADDLconst {
- continue
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVLloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- continue
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVLloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- return false
-}
-func rewriteValue386_Op386MOVLloadidx4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVLloadidx4)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVLloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVLloadidx4)
- v.AuxInt = int64(int32(c + 4*d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVLstore(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVLstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(ptr, val, mem)
- return true
- }
- // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
- // cond: validOff(off)
- // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386MOVLconst {
- break
- }
- c := v_1.AuxInt
- mem := v_2
- if !(validOff(off)) {
- break
- }
- v.reset(Op386MOVLstoreconst)
- v.AuxInt = makeValAndOff(int64(int32(c)), off)
- v.Aux = sym
- v.AddArg2(ptr, mem)
- return true
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(int64(int32(c)), off)
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
}
// match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
@@ -5428,82 +4352,6 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
- // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL4 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVLstoreidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem)
- // cond: ptr.Op != OpSB
- // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- val := v_1
- mem := v_2
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
// match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
// cond: y.Uses==1 && clobber(y)
// result: (ADDLmodify [off] {sym} ptr x mem)
@@ -5912,2089 +4760,328 @@ func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
- // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ return false
+}
+func rewriteValue386_Op386MOVSDconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSDconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSDconst2 (MOVSDconst1 [c]))
+ for {
+ c := v.AuxInt
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSDconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32)
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
for {
- x := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
+ off1 := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != Op386ADDLconst {
break
}
- off := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
+ off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MOVLstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(off)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
+ v.reset(Op386MOVSDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
return true
}
- // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
- x := v.AuxInt
+ off1 := v.AuxInt
sym1 := v.Aux
- if v_0.Op != Op386LEAL4 {
+ if v_0.Op != Op386LEAL {
break
}
- off := v_0.AuxInt
+ off2 := v_0.AuxInt
sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
+ base := v_0.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386MOVLstoreconstidx4)
- v.AuxInt = ValAndOff(x).add(off)
+ v.reset(Op386MOVSDload)
+ v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem)
- // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
- for {
- x := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- v.reset(Op386MOVLstoreconstidx1)
- v.AuxInt = x
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg2(base, mem)
return true
}
return false
}
-func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool {
+func rewriteValue386_Op386MOVSDstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
- // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
for {
- c := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 {
+ if v_0.Op != Op386ADDLconst {
break
}
- idx := v_1.Args[0]
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v_1
mem := v_2
- v.reset(Op386MOVLstoreconstidx4)
- v.AuxInt = c
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg3(ptr, val, mem)
return true
}
- // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
- // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- x := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != Op386LEAL {
break
}
- c := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v_1
mem := v_2
- v.reset(Op386MOVLstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg3(base, val, mem)
return true
}
- // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
- // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ return false
+}
+func rewriteValue386_Op386MOVSSconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSSconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSSconst2 (MOVSSconst1 [c]))
for {
- x := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
+ c := v.AuxInt
+ if !(config.ctxt.Flag_shared) {
break
}
- c := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVLstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.reset(Op386MOVSSconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32)
+ v0.AuxInt = c
+ v.AddArg(v0)
return true
}
return false
}
-func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool {
- v_2 := v.Args[2]
+func rewriteValue386_Op386MOVSSload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem)
- // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
for {
- x := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
if v_0.Op != Op386ADDLconst {
break
}
- c := v_0.AuxInt
+ off2 := v_0.AuxInt
ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVLstoreconstidx4)
- v.AuxInt = ValAndOff(x).add(c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem)
- // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
- for {
- x := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
+ mem := v_1
+ if !(is32Bit(off1 + off2)) {
break
}
- c := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVLstoreconstidx4)
- v.AuxInt = ValAndOff(x).add(4 * c)
+ v.reset(Op386MOVSSload)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg2(ptr, mem)
return true
}
- return false
-}
-func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem)
- // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 2 {
- continue
- }
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVLstoreidx4)
- v.AuxInt = c
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
- // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+ // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != Op386ADDLconst {
- continue
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != Op386LEAL {
+ break
}
- break
- }
- // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- continue
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
}
- break
+ v.reset(Op386MOVSSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(base, mem)
+ return true
}
return false
}
-func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
- v_3 := v.Args[3]
+func rewriteValue386_Op386MOVSSstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
for {
- c := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
if v_0.Op != Op386ADDLconst {
break
}
- d := v_0.AuxInt
+ off2 := v_0.AuxInt
ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVLstoreidx4)
- v.AuxInt = int64(int32(c + d))
+ val := v_1
+ mem := v_2
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
+ v.AddArg3(ptr, val, mem)
return true
}
- // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVLstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
+ // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != Op386LEAL {
break
}
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVLstoreidx4)
- v.AuxInt = int64(int32(c + 4*d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg3(base, val, mem)
return true
}
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLloadidx4 x [off] {sym} ptr idx mem) mem)
- // cond: y.Uses==1 && clobber(y)
- // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem)
+ return false
+}
+func rewriteValue386_Op386MOVWLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWLSXload [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ADDLloadidx4 || y.AuxInt != off || y.Aux != sym {
+ x := v_0
+ if x.Op != Op386MOVWload {
break
}
- mem := y.Args[3]
- x := y.Args[0]
- if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) {
+ off := x.AuxInt
+ sym := x.Aux
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(Op386ADDLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg2(ptr, mem)
return true
}
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLloadidx4 x [off] {sym} ptr idx mem) mem)
- // cond: y.Uses==1 && clobber(y)
- // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem)
+ // match: (MOVWLSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ANDLloadidx4 || y.AuxInt != off || y.Aux != sym {
+ if v_0.Op != Op386ANDLconst {
break
}
- mem := y.Args[3]
- x := y.Args[0]
- if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) {
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
break
}
- v.reset(Op386ANDLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & 0x7fff
+ v.AddArg(x)
return true
}
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLloadidx4 x [off] {sym} ptr idx mem) mem)
- // cond: y.Uses==1 && clobber(y)
- // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem)
+ return false
+}
+func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLSX x)
for {
off := v.AuxInt
sym := v.Aux
ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ORLloadidx4 || y.AuxInt != off || y.Aux != sym {
+ if v_1.Op != Op386MOVWstore {
break
}
- mem := y.Args[3]
- x := y.Args[0]
- if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) {
- break
- }
- v.reset(Op386ORLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLloadidx4 x [off] {sym} ptr idx mem) mem)
- // cond: y.Uses==1 && clobber(y)
- // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386XORLloadidx4 || y.AuxInt != off || y.Aux != sym {
- break
- }
- mem := y.Args[3]
- x := y.Args[0]
- if ptr != y.Args[1] || idx != y.Args[2] || mem != v_3 || !(y.Uses == 1 && clobber(y)) {
- break
- }
- v.reset(Op386XORLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ADDL {
- break
- }
- _ = y.Args[1]
- y_0 := y.Args[0]
- y_1 := y.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
- l := y_0
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- continue
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] {
- continue
- }
- x := y_1
- if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
- continue
- }
- v.reset(Op386ADDLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- break
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(SUBL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (SUBLmodifyidx4 [off] {sym} ptr idx x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386SUBL {
- break
- }
- x := y.Args[1]
- l := y.Args[0]
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- break
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
- break
- }
- v.reset(Op386SUBLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ANDL {
- break
- }
- _ = y.Args[1]
- y_0 := y.Args[0]
- y_1 := y.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
- l := y_0
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- continue
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] {
- continue
- }
- x := y_1
- if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
- continue
- }
- v.reset(Op386ANDLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- break
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ORL {
- break
- }
- _ = y.Args[1]
- y_0 := y.Args[0]
- y_1 := y.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
- l := y_0
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- continue
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] {
- continue
- }
- x := y_1
- if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
- continue
- }
- v.reset(Op386ORLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- break
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386XORL {
- break
- }
- _ = y.Args[1]
- y_0 := y.Args[0]
- y_1 := y.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
- l := y_0
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- continue
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] {
- continue
- }
- x := y_1
- if mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
- continue
- }
- v.reset(Op386XORLmodifyidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, x, mem)
- return true
- }
- break
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (ADDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ADDLconst {
- break
- }
- c := y.AuxInt
- l := y.Args[0]
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- break
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
- break
- }
- v.reset(Op386ADDLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (ANDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ANDLconst {
- break
- }
- c := y.AuxInt
- l := y.Args[0]
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- break
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
- break
- }
- v.reset(Op386ANDLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (ORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386ORLconst {
- break
- }
- c := y.AuxInt
- l := y.Args[0]
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- break
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
- break
- }
- v.reset(Op386ORLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem)
- // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(c,off)
- // result: (XORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- y := v_2
- if y.Op != Op386XORLconst {
- break
- }
- c := y.AuxInt
- l := y.Args[0]
- if l.Op != Op386MOVLloadidx4 || l.AuxInt != off || l.Aux != sym {
- break
- }
- mem := l.Args[2]
- if ptr != l.Args[0] || idx != l.Args[1] || mem != v_3 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(c, off)) {
- break
- }
- v.reset(Op386XORLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSDconst(v *Value) bool {
- b := v.Block
- config := b.Func.Config
- typ := &b.Func.Config.Types
- // match: (MOVSDconst [c])
- // cond: config.ctxt.Flag_shared
- // result: (MOVSDconst2 (MOVSDconst1 [c]))
- for {
- c := v.AuxInt
- if !(config.ctxt.Flag_shared) {
- break
- }
- v.reset(Op386MOVSDconst2)
- v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32)
- v0.AuxInt = c
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSDload(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVSDload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVSDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVSDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg2(base, mem)
- return true
- }
- // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSDloadidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL8 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSDloadidx8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem)
- // cond: ptr.Op != OpSB
- // result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- mem := v_1
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVSDloadidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- return false
-}
-func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVSDloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVSDloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVSDloadidx8)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVSDloadidx8)
- v.AuxInt = int64(int32(c + 8*d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSDstore(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVSDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(ptr, val, mem)
- return true
- }
- // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVSDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, val, mem)
- return true
- }
- // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSDstoreidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL8 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSDstoreidx8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem)
- // cond: ptr.Op != OpSB
- // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- val := v_1
- mem := v_2
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVSDstoreidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
- return false
-}
-func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVSDstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVSDstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVSDstoreidx8)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVSDstoreidx8)
- v.AuxInt = int64(int32(c + 8*d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSSconst(v *Value) bool {
- b := v.Block
- config := b.Func.Config
- typ := &b.Func.Config.Types
- // match: (MOVSSconst [c])
- // cond: config.ctxt.Flag_shared
- // result: (MOVSSconst2 (MOVSSconst1 [c]))
- for {
- c := v.AuxInt
- if !(config.ctxt.Flag_shared) {
- break
- }
- v.reset(Op386MOVSSconst2)
- v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32)
- v0.AuxInt = c
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSSload(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVSSload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVSSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVSSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg2(base, mem)
- return true
- }
- // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSSloadidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL4 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSSloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem)
- // cond: ptr.Op != OpSB
- // result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- mem := v_1
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVSSloadidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- return false
-}
-func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVSSloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVSSloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVSSloadidx4)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVSSloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVSSloadidx4)
- v.AuxInt = int64(int32(c + 4*d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSSstore(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVSSstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(ptr, val, mem)
- return true
- }
- // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVSSstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, val, mem)
- return true
- }
- // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSSstoreidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL4 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVSSstoreidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem)
- // cond: ptr.Op != OpSB
- // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- val := v_1
- mem := v_2
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVSSstoreidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
- return false
-}
-func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVSSstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVSSstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVSSstoreidx4)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVSSstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVSSstoreidx4)
- v.AuxInt = int64(int32(c + 4*d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVWLSX(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVWLSXload [off] {sym} ptr mem)
- for {
- x := v_0
- if x.Op != Op386MOVWload {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- mem := x.Args[1]
- ptr := x.Args[0]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
- v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVWLSX (ANDLconst [c] x))
- // cond: c & 0x8000 == 0
- // result: (ANDLconst [c & 0x7fff] x)
- for {
- if v_0.Op != Op386ANDLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- if !(c&0x8000 == 0) {
- break
- }
- v.reset(Op386ANDLconst)
- v.AuxInt = c & 0x7fff
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
- // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
- // result: (MOVWLSX x)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386MOVWstore {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- x := v_1.Args[1]
- ptr2 := v_1.Args[0]
- if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
- break
- }
- v.reset(Op386MOVWLSX)
- v.AddArg(x)
- return true
- }
- // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVWLSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg2(base, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVWLZX(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVWload [off] {sym} ptr mem)
- for {
- x := v_0
- if x.Op != Op386MOVWload {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- mem := x.Args[1]
- ptr := x.Args[0]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
- v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem)
- for {
- x := v_0
- if x.Op != Op386MOVWloadidx1 {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- mem := x.Args[2]
- ptr := x.Args[0]
- idx := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type)
- v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem)
- for {
- x := v_0
- if x.Op != Op386MOVWloadidx2 {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- mem := x.Args[2]
- ptr := x.Args[0]
- idx := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, Op386MOVWloadidx2, v.Type)
- v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWLZX (ANDLconst [c] x))
- // result: (ANDLconst [c & 0xffff] x)
- for {
- if v_0.Op != Op386ANDLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(Op386ANDLconst)
- v.AuxInt = c & 0xffff
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVWload(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
- // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
- // result: (MOVWLZX x)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386MOVWstore {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- x := v_1.Args[1]
- ptr2 := v_1.Args[0]
- if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
- break
- }
- v.reset(Op386MOVWLZX)
- v.AddArg(x)
- return true
- }
- // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVWload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg2(base, mem)
- return true
- }
- // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVWloadidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL2 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVWloadidx2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWload [off] {sym} (ADDL ptr idx) mem)
- // cond: ptr.Op != OpSB
- // result: (MOVWloadidx1 [off] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- mem := v_1
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVWloadidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- // match: (MOVWload [off] {sym} (SB) _)
- // cond: symIsRO(sym)
- // result: (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != OpSB || !(symIsRO(sym)) {
- break
- }
- v.reset(Op386MOVLconst)
- v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVWloadidx1(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
- // result: (MOVWloadidx2 [c] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
- continue
- }
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVWloadidx2)
- v.AuxInt = c
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != Op386ADDLconst {
- continue
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVWloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- continue
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVWloadidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- break
- }
- return false
-}
-func rewriteValue386_Op386MOVWloadidx2(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
- // result: (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- mem := v_2
- v.reset(Op386MOVWloadidx2)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
- // result: (MOVWloadidx2 [int64(int32(c+2*d))] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVWloadidx2)
- v.AuxInt = int64(int32(c + 2*d))
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386MOVWstore(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
- // result: (MOVWstore [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386MOVWLSX {
- break
- }
- x := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVWstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, x, mem)
- return true
- }
- // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
- // result: (MOVWstore [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386MOVWLZX {
- break
- }
- x := v_1.Args[0]
- mem := v_2
- v.reset(Op386MOVWstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg3(ptr, x, mem)
- return true
- }
- // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVWstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(ptr, val, mem)
- return true
- }
- // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
- // cond: validOff(off)
- // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386MOVLconst {
- break
- }
- c := v_1.AuxInt
- mem := v_2
- if !(validOff(off)) {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(Op386MOVWstoreconst)
- v.AuxInt = makeValAndOff(int64(int16(c)), off)
- v.Aux = sym
- v.AddArg2(ptr, mem)
+ v.reset(Op386MOVWLSX)
+ v.AddArg(x)
return true
}
- // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
@@ -8004,718 +5091,508 @@ func rewriteValue386_Op386MOVWstore(v *Value) bool {
off2 := v_0.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
- val := v_1
- mem := v_2
+ mem := v_1
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, val, mem)
- return true
- }
- // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL2 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVWstoreidx2)
+ v.reset(Op386MOVWLSXload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(ptr, idx, val, mem)
+ v.AddArg2(base, mem)
return true
}
- // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem)
- // cond: ptr.Op != OpSB
- // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- ptr := v_0_0
- idx := v_0_1
- val := v_1
- mem := v_2
- if !(ptr.Op != OpSB) {
- continue
- }
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
- }
- break
- }
- // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ return false
+}
+func rewriteValue386_Op386MOVWLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
- // result: (MOVLstore [i-2] {s} p w mem)
+ // result: @x.Block (MOVWload [off] {sym} ptr mem)
for {
- i := v.AuxInt
- s := v.Aux
- p := v_0
- if v_1.Op != Op386SHRLconst || v_1.AuxInt != 16 {
- break
- }
- w := v_1.Args[0]
- x := v_2
- if x.Op != Op386MOVWstore || x.AuxInt != i-2 || x.Aux != s {
+ x := v_0
+ if x.Op != Op386MOVWload {
break
}
- mem := x.Args[2]
- if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ off := x.AuxInt
+ sym := x.Aux
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(Op386MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
- v.AddArg3(p, w, mem)
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg2(ptr, mem)
return true
}
- // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVLstore [i-2] {s} p w0 mem)
+ // match: (MOVWLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
for {
- i := v.AuxInt
- s := v.Aux
- p := v_0
- if v_1.Op != Op386SHRLconst {
- break
- }
- j := v_1.AuxInt
- w := v_1.Args[0]
- x := v_2
- if x.Op != Op386MOVWstore || x.AuxInt != i-2 || x.Aux != s {
- break
- }
- mem := x.Args[2]
- if p != x.Args[0] {
- break
- }
- w0 := x.Args[1]
- if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if v_0.Op != Op386ANDLconst {
break
}
- v.reset(Op386MOVLstore)
- v.AuxInt = i - 2
- v.Aux = s
- v.AddArg3(p, w0, mem)
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & 0xffff
+ v.AddArg(x)
return true
}
return false
}
-func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
+func rewriteValue386_Op386MOVWload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
- // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
- // cond: ValAndOff(sc).canAdd(off)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
- for {
- sc := v.AuxInt
- s := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v_1
- if !(ValAndOff(sc).canAdd(off)) {
- break
- }
- v.reset(Op386MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = s
- v.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
- // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
- for {
- sc := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v_1
- if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MOVWstoreconst)
- v.AuxInt = ValAndOff(sc).add(off)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg2(ptr, mem)
- return true
- }
- // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- x := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL1 {
- break
- }
- off := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386MOVWstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(off)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem)
- // cond: canMergeSym(sym1, sym2)
- // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLZX x)
for {
- x := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL2 {
- break
- }
- off := v_0.AuxInt
- sym2 := v_0.Aux
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- if !(canMergeSym(sym1, sym2)) {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
break
}
- v.reset(Op386MOVWstoreconstidx2)
- v.AuxInt = ValAndOff(x).add(off)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem)
- // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
- for {
- x := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDL {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- idx := v_0.Args[1]
- ptr := v_0.Args[0]
- mem := v_1
- v.reset(Op386MOVWstoreconstidx1)
- v.AuxInt = x
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.reset(Op386MOVWLZX)
+ v.AddArg(x)
return true
}
- // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
for {
- c := v.AuxInt
- s := v.Aux
- p := v_0
- x := v_1
- if x.Op != Op386MOVWstoreconst {
- break
- }
- a := x.AuxInt
- if x.Aux != s {
+ off1 := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != Op386ADDLconst {
break
}
- mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MOVLstoreconst)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
- v.Aux = s
- v.AddArg2(p, mem)
+ v.reset(Op386MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
return true
}
- // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
- a := v.AuxInt
- s := v.Aux
- p := v_0
- x := v_1
- if x.Op != Op386MOVWstoreconst {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != Op386LEAL {
break
}
- c := x.AuxInt
- if x.Aux != s {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- mem := x.Args[1]
- if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ v.reset(Op386MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
- v.reset(Op386MOVLstoreconst)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
- v.Aux = s
- v.AddArg2(p, mem)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
return true
}
return false
}
-func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool {
+func rewriteValue386_Op386MOVWstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
- // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
for {
- c := v.AuxInt
+ off := v.AuxInt
sym := v.Aux
ptr := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
+ if v_1.Op != Op386MOVWLSX {
break
}
- idx := v_1.Args[0]
+ x := v_1.Args[0]
mem := v_2
- v.reset(Op386MOVWstoreconstidx2)
- v.AuxInt = c
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off
v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg3(ptr, x, mem)
return true
}
- // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
- // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
for {
- x := v.AuxInt
+ off := v.AuxInt
sym := v.Aux
- if v_0.Op != Op386ADDLconst {
+ ptr := v_0
+ if v_1.Op != Op386MOVWLZX {
break
}
- c := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
+ x := v_1.Args[0]
mem := v_2
- v.reset(Op386MOVWstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(c)
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off
v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg3(ptr, x, mem)
return true
}
- // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
- // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
- x := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
+ if v_0.Op != Op386ADDLconst {
break
}
- c := v_1.AuxInt
- idx := v_1.Args[0]
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v_1
mem := v_2
- v.reset(Op386MOVWstoreconstidx1)
- v.AuxInt = ValAndOff(x).add(c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
- for {
- c := v.AuxInt
- s := v.Aux
- p := v_0
- i := v_1
- x := v_2
- if x.Op != Op386MOVWstoreconstidx1 {
- break
- }
- a := x.AuxInt
- if x.Aux != s {
- break
- }
- mem := x.Args[2]
- if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MOVLstoreconstidx1)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
- v.Aux = s
- v.AddArg3(p, i, mem)
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg3(ptr, val, mem)
return true
}
- return false
-}
-func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem)
- // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
for {
- x := v.AuxInt
+ off := v.AuxInt
sym := v.Aux
- if v_0.Op != Op386ADDLconst {
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
break
}
- c := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
+ c := v_1.AuxInt
mem := v_2
- v.reset(Op386MOVWstoreconstidx2)
- v.AuxInt = ValAndOff(x).add(c)
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg2(ptr, mem)
return true
}
- // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem)
- // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- x := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != Op386LEAL {
break
}
- c := v_1.AuxInt
- idx := v_1.Args[0]
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v_1
mem := v_2
- v.reset(Op386MOVWstoreconstidx2)
- v.AuxInt = ValAndOff(x).add(2 * c)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg3(base, val, mem)
return true
}
- // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
- // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
- // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst [1] i) mem)
+ // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
for {
- c := v.AuxInt
+ i := v.AuxInt
s := v.Aux
p := v_0
- i := v_1
- x := v_2
- if x.Op != Op386MOVWstoreconstidx2 {
+ if v_1.Op != Op386SHRLconst || v_1.AuxInt != 16 {
break
}
- a := x.AuxInt
- if x.Aux != s {
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || x.AuxInt != i-2 || x.Aux != s {
break
}
mem := x.Args[2]
- if p != x.Args[0] || i != x.Args[1] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(Op386MOVLstoreconstidx1)
- v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.reset(Op386MOVLstore)
+ v.AuxInt = i - 2
v.Aux = s
- v0 := b.NewValue0(v.Pos, Op386SHLLconst, i.Type)
- v0.AuxInt = 1
- v0.AddArg(i)
- v.AddArg3(p, v0, mem)
+ v.AddArg3(p, w, mem)
return true
}
- return false
-}
-func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem)
- // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
+ // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386SHLLconst || v_1.AuxInt != 1 {
- continue
- }
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVWstoreidx2)
- v.AuxInt = c
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
+ i := v.AuxInt
+ s := v.Aux
+ p := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
}
- break
- }
- // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != Op386ADDLconst {
- continue
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
+ j := v_1.AuxInt
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || x.AuxInt != i-2 || x.Aux != s {
+ break
}
- break
- }
- // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
- continue
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVWstoreidx1)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
- return true
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
}
- break
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg3(p, w0, mem)
+ return true
}
- // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
for {
i := v.AuxInt
- s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 {
- continue
- }
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = i - 2
- v.Aux = s
- v.AddArg4(p, idx, w, mem)
- return true
- }
+ s := v.Aux
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst || v_1.AuxInt != 16 {
+ break
}
- break
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w, mem)
+ return true
}
- // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
for {
i := v.AuxInt
s := v.Aux
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- p := v_0
- idx := v_1
- if v_2.Op != Op386SHRLconst {
- continue
- }
- j := v_2.AuxInt
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVWstoreidx1 || x.AuxInt != i-2 || x.Aux != s {
- continue
- }
- mem := x.Args[3]
- x_0 := x.Args[0]
- x_1 := x.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x_0, x_1 = _i1+1, x_1, x_0 {
- if p != x_0 || idx != x_1 {
- continue
- }
- w0 := x.Args[2]
- if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
- continue
- }
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = i - 2
- v.Aux = s
- v.AddArg4(p, idx, w0, mem)
- return true
- }
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
}
- break
+ j := v_1.AuxInt
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || x.AuxInt != i || x.Aux != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = i
+ v.Aux = s
+ v.AddArg3(p0, w0, mem)
+ return true
}
return false
}
-func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
+func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
- // result: (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
+ config := b.Func.Config
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
for {
- c := v.AuxInt
- sym := v.Aux
+ sc := v.AuxInt
+ s := v.Aux
if v_0.Op != Op386ADDLconst {
break
}
- d := v_0.AuxInt
+ off := v_0.AuxInt
ptr := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- v.reset(Op386MOVWstoreidx2)
- v.AuxInt = int64(int32(c + d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
+ mem := v_1
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg2(ptr, mem)
return true
}
- // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
- // result: (MOVWstoreidx2 [int64(int32(c+2*d))] {sym} ptr idx val mem)
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
for {
- c := v.AuxInt
- sym := v.Aux
- ptr := v_0
- if v_1.Op != Op386ADDLconst {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != Op386LEAL {
break
}
- d := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- v.reset(Op386MOVWstoreidx2)
- v.AuxInt = int64(int32(c + 2*d))
- v.Aux = sym
- v.AddArg4(ptr, idx, val, mem)
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(ptr, mem)
return true
}
- // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w mem)
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
for {
- i := v.AuxInt
+ c := v.AuxInt
s := v.Aux
p := v_0
- idx := v_1
- if v_2.Op != Op386SHRLconst || v_2.AuxInt != 16 {
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
break
}
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s {
+ a := x.AuxInt
+ if x.Aux != s {
break
}
- mem := x.Args[3]
- if p != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
break
}
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = i - 2
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type)
- v0.AuxInt = 1
- v0.AddArg(idx)
- v.AddArg4(p, v0, w, mem)
+ v.AddArg2(p, mem)
return true
}
- // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w0 mem)
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
for {
- i := v.AuxInt
+ a := v.AuxInt
s := v.Aux
p := v_0
- idx := v_1
- if v_2.Op != Op386SHRLconst {
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
break
}
- j := v_2.AuxInt
- w := v_2.Args[0]
- x := v_3
- if x.Op != Op386MOVWstoreidx2 || x.AuxInt != i-2 || x.Aux != s {
+ c := x.AuxInt
+ if x.Aux != s {
break
}
- mem := x.Args[3]
- if p != x.Args[0] || idx != x.Args[1] {
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
break
}
- w0 := x.Args[2]
- if w0.Op != Op386SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p0 mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p1 := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
break
}
- v.reset(Op386MOVLstoreidx1)
- v.AuxInt = i - 2
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg2(p0, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p0 mem)
+ for {
+ a := v.AuxInt
+ s := v.Aux
+ p0 := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ c := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(x.Uses == 1 && ValAndOff(a).Off() == ValAndOff(c).Off() && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
v.Aux = s
- v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type)
- v0.AuxInt = 1
- v0.AddArg(idx)
- v.AddArg4(p, v0, w0, mem)
+ v.AddArg2(p0, mem)
return true
}
return false
@@ -8764,32 +5641,6 @@ func rewriteValue386_Op386MULL(v *Value) bool {
}
break
}
- // match: (MULL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
- // cond: canMergeLoadClobber(v, l, x) && clobber(l)
- // result: (MULLloadidx4 x [off] {sym} ptr idx mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- l := v_1
- if l.Op != Op386MOVLloadidx4 {
- continue
- }
- off := l.AuxInt
- sym := l.Aux
- mem := l.Args[2]
- ptr := l.Args[0]
- idx := l.Args[1]
- if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
- continue
- }
- v.reset(Op386MULLloadidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(x, ptr, idx, mem)
- return true
- }
- break
- }
return false
}
func rewriteValue386_Op386MULLconst(v *Value) bool {
@@ -9201,108 +6052,30 @@ func rewriteValue386_Op386MULLconst(v *Value) bool {
v0.AddArg2(x, x)
v.AddArg(v0)
return true
- }
- // match: (MULLconst [c] (MOVLconst [d]))
- // result: (MOVLconst [int64(int32(c*d))])
- for {
- c := v.AuxInt
- if v_0.Op != Op386MOVLconst {
- break
- }
- d := v_0.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = int64(int32(c * d))
- return true
- }
- return false
-}
-func rewriteValue386_Op386MULLload(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (MULLload [off1+off2] {sym} val base mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386MULLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(val, base, mem)
- return true
- }
- // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386MULLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(val, base, mem)
- return true
- }
- // match: (MULLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MULLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL4 {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- idx := v_1.Args[1]
- ptr := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [int64(int32(c*d))])
+ for {
+ c := v.AuxInt
+ if v_0.Op != Op386MOVLconst {
break
}
- v.reset(Op386MULLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, ptr, idx, mem)
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(int32(c * d))
return true
}
return false
}
-func rewriteValue386_Op386MULLloadidx4(v *Value) bool {
- v_3 := v.Args[3]
+func rewriteValue386_Op386MULLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
- // match: (MULLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (MULLloadidx4 [off1+off2] {sym} val base idx mem)
+ // result: (MULLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
@@ -9312,43 +6085,19 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool {
}
off2 := v_1.AuxInt
base := v_1.Args[0]
- idx := v_2
- mem := v_3
+ mem := v_2
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386MULLloadidx4)
+ v.reset(Op386MULLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (MULLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
- // cond: is32Bit(off1+off2*4)
- // result: (MULLloadidx4 [off1+off2*4] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- base := v_1
- if v_2.Op != Op386ADDLconst {
- break
- }
- off2 := v_2.AuxInt
- idx := v_2.Args[0]
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386MULLloadidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
+ v.AddArg3(val, base, mem)
return true
}
- // match: (MULLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (MULLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
@@ -9359,15 +6108,14 @@ func rewriteValue386_Op386MULLloadidx4(v *Value) bool {
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
- idx := v_2
- mem := v_3
+ mem := v_2
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386MULLloadidx4)
+ v.reset(Op386MULLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, base, idx, mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -9692,32 +6440,6 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
break
}
- // match: (ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
- // cond: canMergeLoadClobber(v, l, x) && clobber(l)
- // result: (ORLloadidx4 x [off] {sym} ptr idx mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- l := v_1
- if l.Op != Op386MOVLloadidx4 {
- continue
- }
- off := l.AuxInt
- sym := l.Aux
- mem := l.Args[2]
- ptr := l.Args[0]
- idx := l.Args[1]
- if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
- continue
- }
- v.reset(Op386ORLloadidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(x, ptr, idx, mem)
- return true
- }
- break
- }
// match: (ORL x x)
// result: x
for {
@@ -9767,6 +6489,42 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
break
}
+ // match: (ORL x0:(MOVBload [i] {s} p0 mem) s0:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != Op386MOVBload {
+ continue
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ s0 := v_1
+ if s0.Op != Op386SHLLconst || s0.AuxInt != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload || x1.AuxInt != i || x1.Aux != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
// match: (ORL o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
// result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
@@ -9831,57 +6589,9 @@ func rewriteValue386_Op386ORL(v *Value) bool {
}
break
}
- // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
- // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, s0)
- // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x0 := v_0
- if x0.Op != Op386MOVBloadidx1 {
- continue
- }
- i0 := x0.AuxInt
- s := x0.Aux
- mem := x0.Args[2]
- x0_0 := x0.Args[0]
- x0_1 := x0.Args[1]
- for _i1 := 0; _i1 <= 1; _i1, x0_0, x0_1 = _i1+1, x0_1, x0_0 {
- p := x0_0
- idx := x0_1
- s0 := v_1
- if s0.Op != Op386SHLLconst || s0.AuxInt != 8 {
- continue
- }
- x1 := s0.Args[0]
- if x1.Op != Op386MOVBloadidx1 {
- continue
- }
- i1 := x1.AuxInt
- if x1.Aux != s {
- continue
- }
- _ = x1.Args[2]
- x1_0 := x1.Args[0]
- x1_1 := x1.Args[1]
- for _i2 := 0; _i2 <= 1; _i2, x1_0, x1_1 = _i2+1, x1_1, x1_0 {
- if p != x1_0 || idx != x1_1 || mem != x1.Args[2] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0)) {
- continue
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type)
- v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
- v0.AddArg3(p, idx, mem)
- return true
- }
- }
- }
- break
- }
- // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)))
- // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
- // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem)
+ // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p0 mem) s0:(SHLLconst [16] x1:(MOVBload [i] {s} p1 mem))) s1:(SHLLconst [24] x2:(MOVBload [i] {s} p2 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && sequentialAddresses(p0, p1, 2) && sequentialAddresses(p1, p2, 1) && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p0 mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
@@ -9893,65 +6603,46 @@ func rewriteValue386_Op386ORL(v *Value) bool {
o0_1 := o0.Args[1]
for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
x0 := o0_0
- if x0.Op != Op386MOVWloadidx1 {
+ if x0.Op != Op386MOVWload {
continue
}
- i0 := x0.AuxInt
+ i := x0.AuxInt
s := x0.Aux
- mem := x0.Args[2]
- x0_0 := x0.Args[0]
- x0_1 := x0.Args[1]
- for _i2 := 0; _i2 <= 1; _i2, x0_0, x0_1 = _i2+1, x0_1, x0_0 {
- p := x0_0
- idx := x0_1
- s0 := o0_1
- if s0.Op != Op386SHLLconst || s0.AuxInt != 16 {
- continue
- }
- x1 := s0.Args[0]
- if x1.Op != Op386MOVBloadidx1 {
- continue
- }
- i2 := x1.AuxInt
- if x1.Aux != s {
- continue
- }
- _ = x1.Args[2]
- x1_0 := x1.Args[0]
- x1_1 := x1.Args[1]
- for _i3 := 0; _i3 <= 1; _i3, x1_0, x1_1 = _i3+1, x1_1, x1_0 {
- if p != x1_0 || idx != x1_1 || mem != x1.Args[2] {
- continue
- }
- s1 := v_1
- if s1.Op != Op386SHLLconst || s1.AuxInt != 24 {
- continue
- }
- x2 := s1.Args[0]
- if x2.Op != Op386MOVBloadidx1 {
- continue
- }
- i3 := x2.AuxInt
- if x2.Aux != s {
- continue
- }
- _ = x2.Args[2]
- x2_0 := x2.Args[0]
- x2_1 := x2.Args[1]
- for _i4 := 0; _i4 <= 1; _i4, x2_0, x2_1 = _i4+1, x2_1, x2_0 {
- if p != x2_0 || idx != x2_1 || mem != x2.Args[2] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
- continue
- }
- b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type)
- v.copyOf(v0)
- v0.AuxInt = i0
- v0.Aux = s
- v0.AddArg3(p, idx, mem)
- return true
- }
- }
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ s0 := o0_1
+ if s0.Op != Op386SHLLconst || s0.AuxInt != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload || x1.AuxInt != i || x1.Aux != s {
+ continue
}
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ s1 := v_1
+ if s1.Op != Op386SHLLconst || s1.AuxInt != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBload || x2.AuxInt != i || x2.Aux != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p2 := x2.Args[0]
+ if mem != x2.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && sequentialAddresses(p0, p1, 2) && sequentialAddresses(p1, p2, 1) && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg2(p0, mem)
+ return true
}
}
break
@@ -10048,168 +6739,15 @@ func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386ORLconstmodifyidx4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2)) {
- break
- }
- v.reset(Op386ORLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (ORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
- // cond: ValAndOff(valoff1).canAdd(off2*4)
- // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
- break
- }
- v.reset(Op386ORLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (ORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ORLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_Op386ORLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
- // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (ORLload [off1+off2] {sym} val base mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386ORLload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(val, base, mem)
- return true
- }
- // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ORLload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(val, base, mem)
- return true
- }
- // match: (ORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (ORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL4 {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- idx := v_1.Args[1]
- ptr := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386ORLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386ORLloadidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
+ // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (ORLloadidx4 [off1+off2] {sym} val base idx mem)
+ // result: (ORLload [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
@@ -10219,43 +6757,19 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool {
}
off2 := v_1.AuxInt
base := v_1.Args[0]
- idx := v_2
- mem := v_3
+ mem := v_2
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386ORLloadidx4)
+ v.reset(Op386ORLload)
v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (ORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
- // cond: is32Bit(off1+off2*4)
- // result: (ORLloadidx4 [off1+off2*4] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- base := v_1
- if v_2.Op != Op386ADDLconst {
- break
- }
- off2 := v_2.AuxInt
- idx := v_2.Args[0]
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386ORLloadidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
+ v.AddArg3(val, base, mem)
return true
}
- // match: (ORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
+ // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
@@ -10266,15 +6780,14 @@ func rewriteValue386_Op386ORLloadidx4(v *Value) bool {
off2 := v_1.AuxInt
sym2 := v_1.Aux
base := v_1.Args[0]
- idx := v_2
- mem := v_3
+ mem := v_2
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386ORLloadidx4)
+ v.reset(Op386ORLload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, base, idx, mem)
+ v.AddArg3(val, base, mem)
return true
}
return false
@@ -10332,107 +6845,6 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386ORLmodifyidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (ORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
- // cond: is32Bit(off1+off2)
- // result: (ORLmodifyidx4 [off1+off2] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386ORLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
- // cond: is32Bit(off1+off2*4)
- // result: (ORLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386ORLmodifyidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (ORLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386ORLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (ORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (ORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- if v_2.Op != Op386MOVLconst {
- break
- }
- c := v_2.AuxInt
- mem := v_3
- if !(validValAndOff(c, off)) {
- break
- }
- v.reset(Op386ORLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_Op386ROLBconst(v *Value) bool {
v_0 := v.Args[0]
// match: (ROLBconst [c] (ROLBconst [d] x))
@@ -11629,29 +8041,6 @@ func rewriteValue386_Op386SUBL(v *Value) bool {
v.AddArg3(x, ptr, mem)
return true
}
- // match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
- // cond: canMergeLoadClobber(v, l, x) && clobber(l)
- // result: (SUBLloadidx4 x [off] {sym} ptr idx mem)
- for {
- x := v_0
- l := v_1
- if l.Op != Op386MOVLloadidx4 {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- mem := l.Args[2]
- ptr := l.Args[0]
- idx := l.Args[1]
- if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
- break
- }
- v.reset(Op386SUBLloadidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(x, ptr, idx, mem)
- return true
- }
// match: (SUBL x x)
// result: (MOVLconst [0])
for {
@@ -11759,220 +8148,39 @@ func rewriteValue386_Op386SUBLload(v *Value) bool {
v.AddArg3(val, base, mem)
return true
}
- // match: (SUBLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL4 {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- idx := v_1.Args[1]
- ptr := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386SUBLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386SUBLloadidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (SUBLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBLloadidx4 [off1+off2] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386SUBLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (SUBLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
- // cond: is32Bit(off1+off2*4)
- // result: (SUBLloadidx4 [off1+off2*4] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- base := v_1
- if v_2.Op != Op386ADDLconst {
- break
- }
- off2 := v_2.AuxInt
- idx := v_2.Args[0]
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386SUBLloadidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (SUBLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386SUBLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, base, idx, mem)
- return true
- }
return false
}
func rewriteValue386_Op386SUBLmodify(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBLmodify [off1+off2] {sym} base val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386SUBLmodify)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg3(base, val, mem)
- return true
- }
- // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v_1
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386SUBLmodify)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, val, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (SUBLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBLmodifyidx4 [off1+off2] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386SUBLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (SUBLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
- // cond: is32Bit(off1+off2*4)
- // result: (SUBLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
+ if v_0.Op != Op386ADDLconst {
break
}
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(Op386SUBLmodifyidx4)
- v.AuxInt = off1 + off2*4
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg4(base, idx, val, mem)
+ v.AddArg3(base, val, mem)
return true
}
- // match: (SUBLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
+ // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (SUBLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
@@ -11982,38 +8190,15 @@ func rewriteValue386_Op386SUBLmodifyidx4(v *Value) bool {
off2 := v_0.AuxInt
sym2 := v_0.Aux
base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
+ val := v_1
+ mem := v_2
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
break
}
- v.reset(Op386SUBLmodifyidx4)
+ v.reset(Op386SUBLmodify)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
- // cond: validValAndOff(-c,off)
- // result: (ADDLconstmodifyidx4 [makeValAndOff(-c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- if v_2.Op != Op386MOVLconst {
- break
- }
- c := v_2.AuxInt
- mem := v_3
- if !(validValAndOff(-c, off)) {
- break
- }
- v.reset(Op386ADDLconstmodifyidx4)
- v.AuxInt = makeValAndOff(-c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
+ v.AddArg3(base, val, mem)
return true
}
return false
@@ -12300,32 +8485,6 @@ func rewriteValue386_Op386XORL(v *Value) bool {
}
break
}
- // match: (XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem))
- // cond: canMergeLoadClobber(v, l, x) && clobber(l)
- // result: (XORLloadidx4 x [off] {sym} ptr idx mem)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- l := v_1
- if l.Op != Op386MOVLloadidx4 {
- continue
- }
- off := l.AuxInt
- sym := l.Aux
- mem := l.Args[2]
- ptr := l.Args[0]
- idx := l.Args[1]
- if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
- continue
- }
- v.reset(Op386XORLloadidx4)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg4(x, ptr, idx, mem)
- return true
- }
- break
- }
// match: (XORL x x)
// result: (MOVLconst [0])
for {
@@ -12431,81 +8590,6 @@ func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386XORLconstmodifyidx4(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (XORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2)
- // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2)) {
- break
- }
- v.reset(Op386XORLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (XORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem)
- // cond: ValAndOff(valoff1).canAdd(off2*4)
- // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2 * 4)) {
- break
- }
- v.reset(Op386XORLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2 * 4)
- v.Aux = sym
- v.AddArg3(base, idx, mem)
- return true
- }
- // match: (XORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem)
- // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem)
- for {
- valoff1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- mem := v_2
- if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386XORLconstmodifyidx4)
- v.AuxInt = ValAndOff(valoff1).add(off2)
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg3(base, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_Op386XORLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -12557,109 +8641,6 @@ func rewriteValue386_Op386XORLload(v *Value) bool {
v.AddArg3(val, base, mem)
return true
}
- // match: (XORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL4 {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- idx := v_1.Args[1]
- ptr := v_1.Args[0]
- mem := v_2
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(Op386XORLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, ptr, idx, mem)
- return true
- }
- return false
-}
-func rewriteValue386_Op386XORLloadidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (XORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem)
- // cond: is32Bit(off1+off2)
- // result: (XORLloadidx4 [off1+off2] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386XORLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (XORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem)
- // cond: is32Bit(off1+off2*4)
- // result: (XORLloadidx4 [off1+off2*4] {sym} val base idx mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- val := v_0
- base := v_1
- if v_2.Op != Op386ADDLconst {
- break
- }
- off2 := v_2.AuxInt
- idx := v_2.Args[0]
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386XORLloadidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(val, base, idx, mem)
- return true
- }
- // match: (XORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- val := v_0
- if v_1.Op != Op386LEAL {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- idx := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386XORLloadidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(val, base, idx, mem)
- return true
- }
return false
}
func rewriteValue386_Op386XORLmodify(v *Value) bool {
@@ -12715,107 +8696,6 @@ func rewriteValue386_Op386XORLmodify(v *Value) bool {
}
return false
}
-func rewriteValue386_Op386XORLmodifyidx4(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
- // match: (XORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem)
- // cond: is32Bit(off1+off2)
- // result: (XORLmodifyidx4 [off1+off2] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- if v_0.Op != Op386ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(Op386XORLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (XORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem)
- // cond: is32Bit(off1+off2*4)
- // result: (XORLmodifyidx4 [off1+off2*4] {sym} base idx val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- base := v_0
- if v_1.Op != Op386ADDLconst {
- break
- }
- off2 := v_1.AuxInt
- idx := v_1.Args[0]
- val := v_2
- mem := v_3
- if !(is32Bit(off1 + off2*4)) {
- break
- }
- v.reset(Op386XORLmodifyidx4)
- v.AuxInt = off1 + off2*4
- v.Aux = sym
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (XORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
- // result: (XORLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- if v_0.Op != Op386LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- idx := v_1
- val := v_2
- mem := v_3
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
- break
- }
- v.reset(Op386XORLmodifyidx4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg4(base, idx, val, mem)
- return true
- }
- // match: (XORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (XORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v_0
- idx := v_1
- if v_2.Op != Op386MOVLconst {
- break
- }
- c := v_2.AuxInt
- mem := v_3
- if !(validValAndOff(c, off)) {
- break
- }
- v.reset(Op386XORLconstmodifyidx4)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg3(ptr, idx, mem)
- return true
- }
- return false
-}
func rewriteValue386_OpConstNil(v *Value) bool {
// match: (ConstNil)
// result: (MOVLconst [0])
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 13873b2ac8..d6213e8741 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -3983,7 +3983,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Convert (Add64 (Convert ptr mem) off) mem)
- // result: (Add64 ptr off)
+ // result: (AddPtr ptr off)
for {
if v_0.Op != OpAdd64 {
break
@@ -4001,14 +4001,14 @@ func rewriteValuegeneric_OpConvert(v *Value) bool {
if mem != v_1 {
continue
}
- v.reset(OpAdd64)
+ v.reset(OpAddPtr)
v.AddArg2(ptr, off)
return true
}
break
}
// match: (Convert (Add32 (Convert ptr mem) off) mem)
- // result: (Add32 ptr off)
+ // result: (AddPtr ptr off)
for {
if v_0.Op != OpAdd32 {
break
@@ -4026,7 +4026,7 @@ func rewriteValuegeneric_OpConvert(v *Value) bool {
if mem != v_1 {
continue
}
- v.reset(OpAdd32)
+ v.reset(OpAddPtr)
v.AddArg2(ptr, off)
return true
}
diff --git a/test/codegen/memops.go b/test/codegen/memops.go
index 9d18153a29..0df191480d 100644
--- a/test/codegen/memops.go
+++ b/test/codegen/memops.go
@@ -99,46 +99,61 @@ func compMem3(x, y *int) (int, bool) {
func idxInt8(x, y []int8, i int) {
var t int8
// amd64: `MOVBL[SZ]X\t1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*`
+ // 386: `MOVBL[SZ]X\t1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\), [A-Z]+[0-9]*`
t = x[i+1]
// amd64: `MOVB\t[A-Z]+[0-9]*, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)`
+ // 386: `MOVB\t[A-Z]+[0-9]*, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)`
y[i+1] = t
// amd64: `MOVB\t[$]77, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)`
+ // 386: `MOVB\t[$]77, 1\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*1\)`
x[i+1] = 77
}
func idxInt16(x, y []int16, i int) {
var t int16
// amd64: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*`
+ // 386: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*`
t = x[i+1]
// amd64: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)`
+ // 386: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)`
y[i+1] = t
// amd64: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*`
+ // 386: `MOVWL[SZ]X\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*`
t = x[16*i+1]
// amd64: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)`
+ // 386: `MOVW\t[A-Z]+[0-9]*, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)`
y[16*i+1] = t
// amd64: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)`
+ // 386: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\)`
x[i+1] = 77
// amd64: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)`
+ // 386: `MOVW\t[$]77, 2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\)`
x[16*i+1] = 77
}
func idxInt32(x, y []int32, i int) {
var t int32
// amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*`
+ // 386: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*`
t = x[i+1]
// amd64: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
+ // 386: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
y[i+1] = t
// amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), [A-Z]+[0-9]*`
t = x[2*i+1]
// amd64: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)`
y[2*i+1] = t
// amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*`
+ // 386: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), [A-Z]+[0-9]*`
t = x[16*i+1]
// amd64: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
+ // 386: `MOVL\t[A-Z]+[0-9]*, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
y[16*i+1] = t
// amd64: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
+ // 386: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
x[i+1] = 77
// amd64: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
+ // 386: `MOVL\t[$]77, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
x[16*i+1] = 77
}
@@ -160,24 +175,71 @@ func idxInt64(x, y []int64, i int) {
func idxFloat32(x, y []float32, i int) {
var t float32
- // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+`
+ // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+`
+ // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+`
t = x[i+1]
- // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
+ // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
+ // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)`
y[i+1] = t
- // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+`
+ // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+`
+ // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+`
t = x[16*i+1]
- // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
+ // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
+ // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)`
y[16*i+1] = t
}
func idxFloat64(x, y []float64, i int) {
var t float64
- // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+`
+ // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+`
+ // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+`
t = x[i+1]
- // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)`
+ // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)`
+ // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)`
y[i+1] = t
- // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+`
+ // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+`
+ // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+`
t = x[16*i+1]
- // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)`
+ // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)`
+ // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)`
y[16*i+1] = t
}
+
+func idxLoadPlusOp(x []int32, i int) int32 {
+ s := x[0]
+ // 386: `ADDL\t4\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
+ s += x[i+1]
+ // 386: `SUBL\t8\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
+ s -= x[i+2]
+ // 386: `IMULL\t12\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
+ s *= x[i+3]
+ // 386: `ANDL\t16\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
+ s &= x[i+4]
+ // 386: `ORL\t20\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
+ s |= x[i+5]
+ // 386: `XORL\t24\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
+ s ^= x[i+6]
+ return s
+}
+
+func idxStorePlusOp(x []int32, i int, v int32) {
+ // 386: `ADDL\t[A-Z]+, 4\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+1] += v
+ // 386: `SUBL\t[A-Z]+, 8\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+2] -= v
+ // 386: `ANDL\t[A-Z]+, 12\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+3] &= v
+ // 386: `ORL\t[A-Z]+, 16\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+4] |= v
+ // 386: `XORL\t[A-Z]+, 20\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+5] ^= v
+
+ // 386: `ADDL\t[$]77, 24\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+6] += 77
+ // 386: `ANDL\t[$]77, 28\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+7] &= 77
+ // 386: `ORL\t[$]77, 32\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+8] |= 77
+ // 386: `XORL\t[$]77, 36\([A-Z]+\)\([A-Z]+\*4\)`
+ x[i+9] ^= 77
+}
--
cgit v1.2.3-54-g00ecf
From f5558bb2f580ed40374a98e8db6fd58ae79f6e1d Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Fri, 27 Mar 2020 10:09:26 -0700
Subject: os/exec: add temporary debugging code for #25628
On linux-386 builders run the TestExtraFiles subprocess under strace,
in hopes of finding out where the unexpected descriptor is coming from.
For #25628
Change-Id: I9a62d6a5192a076525a616ccc71de74bbe7ebd58
Reviewed-on: https://go-review.googlesource.com/c/go/+/225799
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Bryan C. Mills
---
src/os/exec/exec_test.go | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go
index 91dd0a41ac..9d6069093e 100644
--- a/src/os/exec/exec_test.go
+++ b/src/os/exec/exec_test.go
@@ -79,6 +79,13 @@ func helperCommandContext(t *testing.T, ctx context.Context, s ...string) (cmd *
} else {
cmd = exec.Command(os.Args[0], cs...)
}
+
+ // Temporary code to try to resolve #25628.
+ // TODO(iant): Remove this when we no longer need it.
+ if runtime.GOARCH == "386" && runtime.GOOS == "linux" && testenv.Builder() != "" && len(s) == 1 && s[0] == "read3" && ctx == nil {
+ cmd = exec.Command("/usr/bin/strace", append([]string{"-f", os.Args[0]}, cs...)...)
+ }
+
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
return cmd
}
--
cgit v1.2.3-54-g00ecf
From 78a45d8b4592dbd38057a2e9af83c9cf9d62ddc9 Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Thu, 26 Mar 2020 09:13:11 -0700
Subject: runtime: lock mtxpoll in AIX netpollBreak
netpollBreak calls netpollwakeup, and netpollwakeup expects the mtxpoll
lock to be held, so that it has exclusive access to pendingUpdates.
Not acquiring the lock was a mistake in CL 171824. Fortunately it
rarely matters in practice.
Change-Id: I32962ec2575c846ef3d6a91a4d821b2ff02d983c
Reviewed-on: https://go-review.googlesource.com/c/go/+/225618
Reviewed-by: Michael Knyszek
---
src/runtime/netpoll_aix.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go
index c936fbb70f..61becc247e 100644
--- a/src/runtime/netpoll_aix.go
+++ b/src/runtime/netpoll_aix.go
@@ -130,7 +130,9 @@ func netpollarm(pd *pollDesc, mode int) {
// netpollBreak interrupts a poll.
func netpollBreak() {
+ lock(&mtxpoll)
netpollwakeup()
+ unlock(&mtxpoll)
}
// netpoll checks for ready network connections.
--
cgit v1.2.3-54-g00ecf
From 33357270f1e0673641c9eb28498c9c6e2b9bac72 Mon Sep 17 00:00:00 2001
From: Andy Pan
Date: Wed, 25 Dec 2019 03:33:14 +0000
Subject: runtime: refine netpollunblock by removing unreachable 'if' condition
Change-Id: I58ac10013cadd78618124cb7ff134384d158ea4f
GitHub-Last-Rev: 2dfff0d3d3d18ecb196d5357cdfec196424d9e3b
GitHub-Pull-Request: golang/go#36276
Reviewed-on: https://go-review.googlesource.com/c/go/+/212557
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Ian Lance Taylor
---
src/runtime/netpoll.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index 3852598b7e..918c361c2e 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -447,7 +447,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
new = pdReady
}
if atomic.Casuintptr(gpp, old, new) {
- if old == pdReady || old == pdWait {
+ if old == pdWait {
old = 0
}
return (*g)(unsafe.Pointer(old))
--
cgit v1.2.3-54-g00ecf
From 9ceb1e5f5caca5666f9db50864c45ca1f88da1df Mon Sep 17 00:00:00 2001
From: Michael Matloob
Date: Thu, 27 Feb 2020 17:14:07 -0500
Subject: cmd/go: avoid needing to manipulate ImportStack when constructing
error
Simplify the printing of PackageErrors by pushing and popping packages
from the import stack when creating the error, rather than when printing
the error. In some cases, we don't have the same amount of information
to recreate the exact error, so we'll print the name of the package
the error is for, even when it's redundant. In the case of import cycle
errors, this change results in the addition of the position information
of the error.
This change supercedes CLs 220718 and 217106. It introduces a simpler
way to format errors.
Fixes #36173
Change-Id: Ie27011eb71f82e165ed4f9567bba6890a3849fc1
Reviewed-on: https://go-review.googlesource.com/c/go/+/224660
Run-TryBot: Michael Matloob
TryBot-Result: Gobot Gobot
Reviewed-by: Bryan C. Mills
---
src/cmd/go/go_test.go | 2 +-
src/cmd/go/internal/load/pkg.go | 121 ++++++++++++---------
src/cmd/go/internal/load/test.go | 1 -
src/cmd/go/testdata/script/mod_empty_err.txt | 2 +-
.../go/testdata/script/test_import_error_stack.txt | 3 +
src/cmd/go/testdata/script/vet_internal.txt | 14 +--
6 files changed, 81 insertions(+), 62 deletions(-)
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 39e387b9e4..d446e457b5 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -2662,7 +2662,7 @@ func TestBadCommandLines(t *testing.T) {
tg.tempFile("src/-x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x")
- tg.grepStderr("invalid input directory name \"-x\"", "did not reject -x directory")
+ tg.grepStderr("invalid import path \"-x\"", "did not reject -x import path")
tg.tempFile("src/-x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index 21dcee1315..6aea54340d 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -318,16 +318,16 @@ func (p *Package) copyBuild(pp *build.Package) {
// A PackageError describes an error loading information about a package.
type PackageError struct {
- ImportStack []string // shortest path from package named on command line to this one
- Pos string // position of error
- Err error // the error itself
- IsImportCycle bool // the error is an import cycle
- Hard bool // whether the error is soft or hard; soft errors are ignored in some places
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error
+ Err error // the error itself
+ IsImportCycle bool // the error is an import cycle
+ Hard bool // whether the error is soft or hard; soft errors are ignored in some places
+ alwaysPrintStack bool // whether to always print the ImportStack
}
func (p *PackageError) Error() string {
- // Import cycles deserve special treatment.
- if p.Pos != "" && !p.IsImportCycle {
+ if p.Pos != "" && (len(p.ImportStack) == 0 || !p.alwaysPrintStack) {
// Omit import stack. The full path to the file where the error
// is the most important thing.
return p.Pos + ": " + p.Err.Error()
@@ -339,15 +339,14 @@ func (p *PackageError) Error() string {
// last path on the stack, we don't omit the path. An error like
// "package A imports B: error loading C caused by B" would not be clearer
// if "imports B" were omitted.
- stack := p.ImportStack
- var ierr ImportPathError
- if len(stack) > 0 && errors.As(p.Err, &ierr) && ierr.ImportPath() == stack[len(stack)-1] {
- stack = stack[:len(stack)-1]
- }
- if len(stack) == 0 {
+ if len(p.ImportStack) == 0 {
return p.Err.Error()
}
- return "package " + strings.Join(stack, "\n\timports ") + ": " + p.Err.Error()
+ var optpos string
+ if p.Pos != "" {
+ optpos = "\n\t" + p.Pos
+ }
+ return "package " + strings.Join(p.ImportStack, "\n\timports ") + optpos + ": " + p.Err.Error()
}
func (p *PackageError) Unwrap() error { return p.Err }
@@ -549,9 +548,6 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
panic("LoadImport called with empty package path")
}
- stk.Push(path)
- defer stk.Pop()
-
var parentPath, parentRoot string
parentIsStd := false
if parent != nil {
@@ -564,6 +560,11 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
pre.preloadImports(bp.Imports, bp)
}
if bp == nil {
+ if importErr, ok := err.(ImportPathError); !ok || importErr.ImportPath() != path {
+ // Only add path to the error's import stack if it's not already present on the error.
+ stk.Push(path)
+ defer stk.Pop()
+ }
return &Package{
PackagePublic: PackagePublic{
ImportPath: path,
@@ -578,7 +579,9 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
importPath := bp.ImportPath
p := packageCache[importPath]
if p != nil {
+ stk.Push(path)
p = reusePackage(p, stk)
+ stk.Pop()
} else {
p = new(Package)
p.Internal.Local = build.IsLocalImport(path)
@@ -588,8 +591,11 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
// Load package.
// loadPackageData may return bp != nil even if an error occurs,
// in order to return partial information.
- p.load(stk, bp, err)
- if p.Error != nil && p.Error.Pos == "" {
+ p.load(path, stk, bp, err)
+ // Add position information unless this is a NoGoError or an ImportCycle error.
+ // Import cycles deserve special treatment.
+ var g *build.NoGoError
+ if p.Error != nil && p.Error.Pos == "" && !errors.As(err, &g) && !p.Error.IsImportCycle {
p = setErrorPos(p, importPos)
}
@@ -608,7 +614,7 @@ func loadImport(pre *preload, path, srcDir string, parent *Package, stk *ImportS
return setErrorPos(perr, importPos)
}
if mode&ResolveImport != 0 {
- if perr := disallowVendor(srcDir, path, p, stk); perr != p {
+ if perr := disallowVendor(srcDir, path, parentPath, p, stk); perr != p {
return setErrorPos(perr, importPos)
}
}
@@ -1246,7 +1252,7 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p *
// as if it were generated into the testing directory tree
// (it's actually in a temporary directory outside any Go tree).
// This cleans up a former kludge in passing functionality to the testing package.
- if strings.HasPrefix(p.ImportPath, "testing/internal") && len(*stk) >= 2 && (*stk)[len(*stk)-2] == "testmain" {
+ if str.HasPathPrefix(p.ImportPath, "testing/internal") && importerPath == "testmain" {
return p
}
@@ -1262,11 +1268,10 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p *
return p
}
- // The stack includes p.ImportPath.
- // If that's the only thing on the stack, we started
+ // importerPath is empty: we started
// with a name given on the command line, not an
// import. Anything listed on the command line is fine.
- if len(*stk) == 1 {
+ if importerPath == "" {
return p
}
@@ -1315,8 +1320,9 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p *
// Internal is present, and srcDir is outside parent's tree. Not allowed.
perr := *p
perr.Error = &PackageError{
- ImportStack: stk.Copy(),
- Err: ImportErrorf(p.ImportPath, "use of internal package "+p.ImportPath+" not allowed"),
+ alwaysPrintStack: true,
+ ImportStack: stk.Copy(),
+ Err: ImportErrorf(p.ImportPath, "use of internal package "+p.ImportPath+" not allowed"),
}
perr.Incomplete = true
return &perr
@@ -1344,16 +1350,15 @@ func findInternal(path string) (index int, ok bool) {
// disallowVendor checks that srcDir is allowed to import p as path.
// If the import is allowed, disallowVendor returns the original package p.
// If not, it returns a new package containing just an appropriate error.
-func disallowVendor(srcDir string, path string, p *Package, stk *ImportStack) *Package {
- // The stack includes p.ImportPath.
- // If that's the only thing on the stack, we started
+func disallowVendor(srcDir string, path string, importerPath string, p *Package, stk *ImportStack) *Package {
+ // If the importerPath is empty, we started
// with a name given on the command line, not an
// import. Anything listed on the command line is fine.
- if len(*stk) == 1 {
+ if importerPath == "" {
return p
}
- if perr := disallowVendorVisibility(srcDir, p, stk); perr != p {
+ if perr := disallowVendorVisibility(srcDir, p, importerPath, stk); perr != p {
return perr
}
@@ -1376,12 +1381,12 @@ func disallowVendor(srcDir string, path string, p *Package, stk *ImportStack) *P
// is not subject to the rules, only subdirectories of vendor.
// This allows people to have packages and commands named vendor,
// for maximal compatibility with existing source trees.
-func disallowVendorVisibility(srcDir string, p *Package, stk *ImportStack) *Package {
- // The stack includes p.ImportPath.
- // If that's the only thing on the stack, we started
+func disallowVendorVisibility(srcDir string, p *Package, importerPath string, stk *ImportStack) *Package {
+ // The stack does not include p.ImportPath.
+ // If there's nothing on the stack, we started
// with a name given on the command line, not an
// import. Anything listed on the command line is fine.
- if len(*stk) == 1 {
+ if importerPath == "" {
return p
}
@@ -1525,7 +1530,8 @@ func (p *Package) DefaultExecName() string {
// load populates p using information from bp, err, which should
// be the result of calling build.Context.Import.
-func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
+// stk contains the import stack, not including path itself.
+func (p *Package) load(path string, stk *ImportStack, bp *build.Package, err error) {
p.copyBuild(bp)
// The localPrefix is the path we interpret ./ imports relative to.
@@ -1548,7 +1554,16 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
if err != nil {
p.Incomplete = true
+ // Report path in error stack unless err is an ImportPathError with path already set.
+ pushed := false
+ if e, ok := err.(ImportPathError); !ok || e.ImportPath() != path {
+ stk.Push(path)
+ pushed = true // Remember to pop after setError.
+ }
setError(base.ExpandScanner(p.rewordError(err)))
+ if pushed {
+ stk.Pop()
+ }
if _, isScanErr := err.(scanner.ErrorList); !isScanErr {
return
}
@@ -1675,6 +1690,23 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
}
}
+ // Check for case-insensitive collisions of import paths.
+ fold := str.ToFold(p.ImportPath)
+ if other := foldPath[fold]; other == "" {
+ foldPath[fold] = p.ImportPath
+ } else if other != p.ImportPath {
+ setError(ImportErrorf(p.ImportPath, "case-insensitive import collision: %q and %q", p.ImportPath, other))
+ return
+ }
+
+ if !SafeArg(p.ImportPath) {
+ setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath))
+ return
+ }
+
+ stk.Push(path)
+ defer stk.Pop()
+
// Check for case-insensitive collision of input files.
// To avoid problems on case-insensitive files, we reject any package
// where two different input files have equal names under a case-insensitive
@@ -1703,10 +1735,6 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
setError(fmt.Errorf("invalid input directory name %q", name))
return
}
- if !SafeArg(p.ImportPath) {
- setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath))
- return
- }
// Build list of imported packages and full dependency list.
imports := make([]*Package, 0, len(p.Imports))
@@ -1770,15 +1798,6 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
return
}
- // Check for case-insensitive collisions of import paths.
- fold := str.ToFold(p.ImportPath)
- if other := foldPath[fold]; other == "" {
- foldPath[fold] = p.ImportPath
- } else if other != p.ImportPath {
- setError(ImportErrorf(p.ImportPath, "case-insensitive import collision: %q and %q", p.ImportPath, other))
- return
- }
-
if cfg.ModulesEnabled && p.Error == nil {
mainPath := p.ImportPath
if p.Internal.CmdlineFiles {
@@ -2266,9 +2285,7 @@ func GoFilesPackage(gofiles []string) *Package {
pkg := new(Package)
pkg.Internal.Local = true
pkg.Internal.CmdlineFiles = true
- stk.Push("main")
- pkg.load(&stk, bp, err)
- stk.Pop()
+ pkg.load("command-line-arguments", &stk, bp, err)
pkg.Internal.LocalPrefix = dirToImportPath(dir)
pkg.ImportPath = "command-line-arguments"
pkg.Target = ""
diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go
index 866e0e567f..6465f46f4e 100644
--- a/src/cmd/go/internal/load/test.go
+++ b/src/cmd/go/internal/load/test.go
@@ -56,7 +56,6 @@ func TestPackagesFor(p *Package, cover *TestCover) (pmain, ptest, pxtest *Packag
}
if len(p1.DepsErrors) > 0 {
perr := p1.DepsErrors[0]
- perr.Pos = "" // show full import stack
err = perr
break
}
diff --git a/src/cmd/go/testdata/script/mod_empty_err.txt b/src/cmd/go/testdata/script/mod_empty_err.txt
index b309f634dd..982e6b2e51 100644
--- a/src/cmd/go/testdata/script/mod_empty_err.txt
+++ b/src/cmd/go/testdata/script/mod_empty_err.txt
@@ -10,7 +10,7 @@ go list -e -f {{.Error}} ./empty
stdout 'no Go files in \$WORK[/\\]empty'
go list -e -f {{.Error}} ./exclude
-stdout 'package example.com/m/exclude: build constraints exclude all Go files in \$WORK[/\\]exclude'
+stdout 'build constraints exclude all Go files in \$WORK[/\\]exclude'
go list -e -f {{.Error}} ./missing
stdout 'stat '$WORK'[/\\]missing: directory not found'
diff --git a/src/cmd/go/testdata/script/test_import_error_stack.txt b/src/cmd/go/testdata/script/test_import_error_stack.txt
index 3b796053f7..c66c1213a4 100644
--- a/src/cmd/go/testdata/script/test_import_error_stack.txt
+++ b/src/cmd/go/testdata/script/test_import_error_stack.txt
@@ -1,6 +1,9 @@
! go test testdep/p1
stderr 'package testdep/p1 \(test\)\n\timports testdep/p2\n\timports testdep/p3: build constraints exclude all Go files ' # check for full import stack
+! go vet testdep/p1
+stderr 'package testdep/p1 \(test\)\n\timports testdep/p2\n\timports testdep/p3: build constraints exclude all Go files ' # check for full import stack
+
-- testdep/p1/p1.go --
package p1
-- testdep/p1/p1_test.go --
diff --git a/src/cmd/go/testdata/script/vet_internal.txt b/src/cmd/go/testdata/script/vet_internal.txt
index 46e1ac7398..85f709302c 100644
--- a/src/cmd/go/testdata/script/vet_internal.txt
+++ b/src/cmd/go/testdata/script/vet_internal.txt
@@ -3,28 +3,28 @@ env GO111MODULE=off
# Issue 36173. Verify that "go vet" prints line numbers on load errors.
! go vet a/a.go
-stderr '^a[/\\]a.go:5:3: use of internal package'
+stderr '^package command-line-arguments\n\ta[/\\]a.go:5:3: use of internal package'
! go vet a/a_test.go
-stderr '^package command-line-arguments \(test\): use of internal package' # BUG
+stderr '^package command-line-arguments \(test\)\n\ta[/\\]a_test.go:4:3: use of internal package'
! go vet a
-stderr '^a[/\\]a.go:5:3: use of internal package'
+stderr '^package a\n\ta[/\\]a.go:5:3: use of internal package'
go vet b/b.go
! stderr 'use of internal package'
! go vet b/b_test.go
-stderr '^package command-line-arguments \(test\): use of internal package' # BUG
+stderr '^package command-line-arguments \(test\)\n\tb[/\\]b_test.go:4:3: use of internal package'
! go vet depends-on-a/depends-on-a.go
-stderr '^a[/\\]a.go:5:3: use of internal package'
+stderr '^package command-line-arguments\n\timports a\n\ta[/\\]a.go:5:3: use of internal package'
! go vet depends-on-a/depends-on-a_test.go
-stderr '^package command-line-arguments \(test\)\n\timports a: use of internal package a/x/internal/y not allowed$' # BUG
+stderr '^package command-line-arguments \(test\)\n\timports a\n\ta[/\\]a.go:5:3: use of internal package a/x/internal/y not allowed'
! go vet depends-on-a
-stderr '^a[/\\]a.go:5:3: use of internal package'
+stderr '^package depends-on-a\n\timports a\n\ta[/\\]a.go:5:3: use of internal package'
-- a/a.go --
// A package with bad imports in both src and test
--
cgit v1.2.3-54-g00ecf
From 9131f08a23bd5923d135df15da30b322748ffa12 Mon Sep 17 00:00:00 2001
From: Bradford Lamson-Scribner
Date: Tue, 10 Mar 2020 21:26:42 -0600
Subject: cmd/compile: add dark mode functionality to CFGs in the ssa.html
output
add dark mode to CFGs in the ssa.html output by targeting individual
parts of each svg and applying dark mode styles to the stroke & fill.
Fixes #37767
Change-Id: Ic867e161c6837c26d9d735ea02bc94fdb56102f6
Reviewed-on: https://go-review.googlesource.com/c/go/+/222877
Reviewed-by: Josh Bleecher Snyder
Run-TryBot: Josh Bleecher Snyder
TryBot-Result: Gobot Gobot
---
src/cmd/compile/internal/ssa/html.go | 37 +++++++++++++++++++++++++++++++++---
1 file changed, 34 insertions(+), 3 deletions(-)
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 54fa54a477..1eed224934 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -263,6 +263,14 @@ body.darkmode table, th {
border: 1px solid gray;
}
+body.darkmode text {
+ fill: white;
+}
+
+body.darkmode svg polygon:first-child {
+ fill: rgb(21, 21, 21);
+}
+
.highlight-aquamarine { background-color: aquamarine; color: black; }
.highlight-coral { background-color: coral; color: black; }
.highlight-lightpink { background-color: lightpink; color: black; }
@@ -304,7 +312,7 @@ body.darkmode table, th {
color: gray;
}
-.outline-blue { outline: blue solid 2px; }
+.outline-blue { outline: #2893ff solid 2px; }
.outline-red { outline: red solid 2px; }
.outline-blueviolet { outline: blueviolet solid 2px; }
.outline-darkolivegreen { outline: darkolivegreen solid 2px; }
@@ -316,7 +324,7 @@ body.darkmode table, th {
.outline-maroon { outline: maroon solid 2px; }
.outline-black { outline: black solid 2px; }
-ellipse.outline-blue { stroke-width: 2px; stroke: blue; }
+ellipse.outline-blue { stroke-width: 2px; stroke: #2893ff; }
ellipse.outline-red { stroke-width: 2px; stroke: red; }
ellipse.outline-blueviolet { stroke-width: 2px; stroke: blueviolet; }
ellipse.outline-darkolivegreen { stroke-width: 2px; stroke: darkolivegreen; }
@@ -642,12 +650,35 @@ function makeDraggable(event) {
function toggleDarkMode() {
document.body.classList.toggle('darkmode');
+ // Collect all of the "collapsed" elements and apply dark mode on each collapsed column
const collapsedEls = document.getElementsByClassName('collapsed');
const len = collapsedEls.length;
for (let i = 0; i < len; i++) {
collapsedEls[i].classList.toggle('darkmode');
}
+
+ // Collect and spread the appropriate elements from all of the svgs on the page into one array
+ const svgParts = [
+ ...document.querySelectorAll('path'),
+ ...document.querySelectorAll('ellipse'),
+ ...document.querySelectorAll('polygon'),
+ ];
+
+ // Iterate over the svgParts specifically looking for white and black fill/stroke to be toggled.
+ // The verbose conditional is intentional here so that we do not mutate any svg path, ellipse, or polygon that is of any color other than white or black.
+ svgParts.forEach(el => {
+ if (el.attributes.stroke.value === 'white') {
+ el.attributes.stroke.value = 'black';
+ } else if (el.attributes.stroke.value === 'black') {
+ el.attributes.stroke.value = 'white';
+ }
+ if (el.attributes.fill.value === 'white') {
+ el.attributes.fill.value = 'black';
+ } else if (el.attributes.fill.value === 'black') {
+ el.attributes.fill.value = 'white';
+ }
+ });
}
@@ -1016,7 +1047,7 @@ func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Func) {
arrow = "dotvee"
layoutDrawn[s.b.ID] = true
} else if isBackEdge(b.ID, s.b.ID) {
- color = "blue"
+ color = "#2893ff"
}
fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s.b, i, style, color, arrow)
}
--
cgit v1.2.3-54-g00ecf
From ef220dc53ed204386b30879ff1882b70a7fd602b Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Sat, 28 Mar 2020 00:21:57 +0000
Subject: Revert "runtime: lock mtxpoll in AIX netpollBreak"
This reverts CL 225618.
This is causing TestNetpollBreak to fail on AIX more often than not.
Change-Id: Ia3c24041ead4b320202f7f5b17a6b286f639a689
Reviewed-on: https://go-review.googlesource.com/c/go/+/226198
Run-TryBot: Ian Lance Taylor
Reviewed-by: Bryan C. Mills
TryBot-Result: Gobot Gobot
---
src/runtime/netpoll_aix.go | 2 --
1 file changed, 2 deletions(-)
diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go
index 61becc247e..c936fbb70f 100644
--- a/src/runtime/netpoll_aix.go
+++ b/src/runtime/netpoll_aix.go
@@ -130,9 +130,7 @@ func netpollarm(pd *pollDesc, mode int) {
// netpollBreak interrupts a poll.
func netpollBreak() {
- lock(&mtxpoll)
netpollwakeup()
- unlock(&mtxpoll)
}
// netpoll checks for ready network connections.
--
cgit v1.2.3-54-g00ecf
From d99fe1f40dfacfdebee22c13ed4471fd50f2cc1a Mon Sep 17 00:00:00 2001
From: PetarDambovaliev
Date: Sat, 28 Mar 2020 08:00:17 +0000
Subject: time: remove some unnecessary/duplicated global slices
Removes two variables:
- days which is unused, and similar usage provided by longDayNames
- months in favour of using longMonthNames
Fixes #36359
Change-Id: I51b6b7408db9359c658462ba73e59ed432f655a6
GitHub-Last-Rev: 778d3ea157d363fcb5bced6d318381b44a1cac50
GitHub-Pull-Request: golang/go#36372
Reviewed-on: https://go-review.googlesource.com/c/go/+/213177
Reviewed-by: Emmanuel Odeke
Run-TryBot: Emmanuel Odeke
TryBot-Result: Gobot Gobot
---
src/time/time.go | 29 ++---------------------------
1 file changed, 2 insertions(+), 27 deletions(-)
diff --git a/src/time/time.go b/src/time/time.go
index 3f632dbc3e..3d242f2541 100644
--- a/src/time/time.go
+++ b/src/time/time.go
@@ -287,25 +287,10 @@ const (
December
)
-var months = [...]string{
- "January",
- "February",
- "March",
- "April",
- "May",
- "June",
- "July",
- "August",
- "September",
- "October",
- "November",
- "December",
-}
-
// String returns the English name of the month ("January", "February", ...).
func (m Month) String() string {
if January <= m && m <= December {
- return months[m-1]
+ return longMonthNames[m-1]
}
buf := make([]byte, 20)
n := fmtInt(buf, uint64(m))
@@ -325,20 +310,10 @@ const (
Saturday
)
-var days = [...]string{
- "Sunday",
- "Monday",
- "Tuesday",
- "Wednesday",
- "Thursday",
- "Friday",
- "Saturday",
-}
-
// String returns the English name of the day ("Sunday", "Monday", ...).
func (d Weekday) String() string {
if Sunday <= d && d <= Saturday {
- return days[d]
+ return longDayNames[d]
}
buf := make([]byte, 20)
n := fmtInt(buf, uint64(d))
--
cgit v1.2.3-54-g00ecf
From 45f99d85e0d22a4414ebbdc41de843d88064f374 Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Fri, 27 Mar 2020 17:37:37 -0700
Subject: runtime: avoid racing on pendingUpdates in AIX netpollBreak
Instead of calling netpollwakeup, just do the write in netpollBreak.
Use the same signaling we now use in other netpollBreak instances.
Change-Id: I53a65c22862ecc8484aee91d0e1ffb21a9e62d8c
Reviewed-on: https://go-review.googlesource.com/c/go/+/226199
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Bryan C. Mills
---
src/runtime/netpoll_aix.go | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go
index c936fbb70f..3c1f70874d 100644
--- a/src/runtime/netpoll_aix.go
+++ b/src/runtime/netpoll_aix.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// This is based on the former libgo/runtime/netpoll_select.c implementation
// except that it uses poll instead of select and is written in Go.
@@ -41,6 +44,8 @@ var (
rdwake int32
wrwake int32
pendingUpdates int32
+
+ netpollWakeSig uintptr // used to avoid duplicate calls of netpollBreak
)
func netpollinit() {
@@ -130,7 +135,10 @@ func netpollarm(pd *pollDesc, mode int) {
// netpollBreak interrupts a poll.
func netpollBreak() {
- netpollwakeup()
+ if atomic.Casuintptr(&netpollWakeSig, 0, 1) {
+ b := [1]byte{0}
+ write(uintptr(wrwake), unsafe.Pointer(&b[0]), 1)
+ }
}
// netpoll checks for ready network connections.
@@ -184,6 +192,7 @@ retry:
var b [1]byte
for read(rdwake, unsafe.Pointer(&b[0]), 1) == 1 {
}
+ atomic.Storeuintptr(&netpollWakeSig, 0)
}
// Still look at the other fds even if the mode may have
// changed, as netpollBreak might have been called.
--
cgit v1.2.3-54-g00ecf
From 2ba00e47545406b3dd11436e3f1acf841d4932c6 Mon Sep 17 00:00:00 2001
From: Giovanni Bajo
Date: Sat, 21 Mar 2020 13:22:14 +0100
Subject: doc: decrease prominence of GOROOT_BOOTSTRAP
Go build scripts on UNIX (make.bash, all.bash) have not required
GOROOT_BOOTSTRAP since August 2017 (CL 57753). Windows build scripts
have followed suit since CL 96455. Most people building Go will have
a Go toolchain in their PATH and will not need to specify a different
toolchain.
This CL removes the GOROOT_BOOTSTRAP mention from the contribution guide
(it was there for Windows only, but it's not required anymore). The guide
is meant to be light and clear for beginners and is not supposed to be
a reference, so there's not need to keep mentioning GOROOT_BOOTSTRAP.
Also update install-source.html to reflect the current status quo,
where using the PATH is probably the first and most used default, and
GOROOT_BOOTSTRAP is just an option.
Change-Id: Iab453e61b0c749c256aaaf81ea9b2ae58822cb89
Reviewed-on: https://go-review.googlesource.com/c/go/+/224717
Run-TryBot: Giovanni Bajo
TryBot-Result: Gobot Gobot
Reviewed-by: Rob Pike
---
doc/contribute.html | 4 +---
doc/install-source.html | 22 ++++++++--------------
2 files changed, 9 insertions(+), 17 deletions(-)
diff --git a/doc/contribute.html b/doc/contribute.html
index 551d510288..4135d13652 100644
--- a/doc/contribute.html
+++ b/doc/contribute.html
@@ -552,9 +552,7 @@ $ ./all.bash
-(To build under Windows use all.bat; this also requires
-setting the environment variable GOROOT_BOOTSTRAP to the
-directory holding the Go tree for the bootstrap compiler.)
+(To build under Windows use all.bat)
diff --git a/doc/install-source.html b/doc/install-source.html
index 17b1c9cbb7..3d42a10ad6 100644
--- a/doc/install-source.html
+++ b/doc/install-source.html
@@ -106,23 +106,17 @@ Go does not support CentOS 6 on these systems.
-
Install Go compiler binaries
+
Install Go compiler binaries for bootstrap
The Go toolchain is written in Go. To build it, you need a Go compiler installed.
-The scripts that do the initial build of the tools look for an existing Go tool
-chain in $GOROOT_BOOTSTRAP.
-If unset, the default value of GOROOT_BOOTSTRAP
-is $HOME/go1.4.
-
-
-
-There are many options for the bootstrap toolchain.
-After obtaining one, set GOROOT_BOOTSTRAP to the
-directory containing the unpacked tree.
-For example, $GOROOT_BOOTSTRAP/bin/go should be
-the go command binary for the bootstrap toolchain.
-
+The scripts that do the initial build of the tools look for a "go" command
+in $PATH, so as long as you have Go installed in your
+system and configured in your $PATH, you are ready to build Go
+from source.
+Or if you prefer you can set $GOROOT_BOOTSTRAP to the
+root of a Go installation to use to build the new Go toolchain;
+$GOROOT_BOOTSTRAP/bin/go should be the go command to use.
Bootstrap toolchain from binary release
--
cgit v1.2.3-54-g00ecf
From 5aef51a729f428bfd4b2c28fd2ba7950660608e0 Mon Sep 17 00:00:00 2001
From: Daniel Theophanes
Date: Wed, 18 Mar 2020 10:03:51 -0700
Subject: database/sql: add test for Conn.Validator interface
This addresses comments made by Russ after
https://golang.org/cl/174122 was merged. It addes a test
for the connection validator and renames the interface to just
"Validator".
Change-Id: Iea53e9b250c9be2e86e9b75906e7353e26437c5c
Reviewed-on: https://go-review.googlesource.com/c/go/+/223963
Reviewed-by: Emmanuel Odeke
---
src/database/sql/driver/driver.go | 8 ++++----
src/database/sql/fakedb_test.go | 4 ++--
src/database/sql/sql.go | 4 ++--
src/database/sql/sql_test.go | 31 +++++++++++++++++++++++++++++++
4 files changed, 39 insertions(+), 8 deletions(-)
diff --git a/src/database/sql/driver/driver.go b/src/database/sql/driver/driver.go
index a2b844d71f..76f1bd3aa1 100644
--- a/src/database/sql/driver/driver.go
+++ b/src/database/sql/driver/driver.go
@@ -261,15 +261,15 @@ type SessionResetter interface {
ResetSession(ctx context.Context) error
}
-// ConnectionValidator may be implemented by Conn to allow drivers to
+// Validator may be implemented by Conn to allow drivers to
// signal if a connection is valid or if it should be discarded.
//
// If implemented, drivers may return the underlying error from queries,
// even if the connection should be discarded by the connection pool.
-type ConnectionValidator interface {
- // ValidConnection is called prior to placing the connection into the
+type Validator interface {
+ // IsValid is called prior to placing the connection into the
// connection pool. The connection will be discarded if false is returned.
- ValidConnection() bool
+ IsValid() bool
}
// Result is the result of a query execution.
diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go
index 73dab101b7..b6e9a5707e 100644
--- a/src/database/sql/fakedb_test.go
+++ b/src/database/sql/fakedb_test.go
@@ -396,9 +396,9 @@ func (c *fakeConn) ResetSession(ctx context.Context) error {
return nil
}
-var _ driver.ConnectionValidator = (*fakeConn)(nil)
+var _ driver.Validator = (*fakeConn)(nil)
-func (c *fakeConn) ValidConnection() bool {
+func (c *fakeConn) IsValid() bool {
return !c.isBad()
}
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index 95906b1318..4093ffe1bb 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -512,8 +512,8 @@ func (dc *driverConn) validateConnection(needsReset bool) bool {
if needsReset {
dc.needReset = true
}
- if cv, ok := dc.ci.(driver.ConnectionValidator); ok {
- return cv.ValidConnection()
+ if cv, ok := dc.ci.(driver.Validator); ok {
+ return cv.IsValid()
}
return true
}
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 0fc994d0a1..f08eba93b3 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -1543,6 +1543,37 @@ func TestConnTx(t *testing.T) {
}
}
+// TestConnIsValid verifies that a database connection that should be discarded,
+// is actually discarded and does not re-enter the connection pool.
+// If the IsValid method from *fakeConn is removed, this test will fail.
+func TestConnIsValid(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+
+ db.SetMaxOpenConns(1)
+
+ ctx := context.Background()
+
+ c, err := db.Conn(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = c.Raw(func(raw interface{}) error {
+ dc := raw.(*fakeConn)
+ dc.stickyBad = true
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.Close()
+
+ if len(db.freeConn) > 0 && db.freeConn[0].ci.(*fakeConn).stickyBad {
+ t.Fatal("bad connection returned to pool; expected bad connection to be discarded")
+ }
+}
+
// Tests fix for issue 2542, that we release a lock when querying on
// a closed connection.
func TestIssue2542Deadlock(t *testing.T) {
--
cgit v1.2.3-54-g00ecf
From 82047a080f0aa320e316773fe8bcbb7c7bcd5a1f Mon Sep 17 00:00:00 2001
From: alex-semenyuk
Date: Sun, 29 Mar 2020 08:12:06 +0000
Subject: test, test/fixedbugs, crypto/x509, go/internal/gccgoimporter: fix
typos
Change-Id: Ie2d605ca8cc3bde2e26c6865642ff4e6412cd075
GitHub-Last-Rev: ce5c3ba369b2ef476e7c63e4404baa256584f357
GitHub-Pull-Request: golang/go#38137
Reviewed-on: https://go-review.googlesource.com/c/go/+/226201
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Ian Lance Taylor
---
src/crypto/x509/x509_test.go | 2 +-
src/go/internal/gccgoimporter/parser.go | 2 +-
test/blank1.go | 2 +-
test/chanlinear.go | 2 +-
test/fixedbugs/issue9521.go | 2 +-
5 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go
index 0c6747d28d..c2f110e87b 100644
--- a/src/crypto/x509/x509_test.go
+++ b/src/crypto/x509/x509_test.go
@@ -1806,7 +1806,7 @@ func TestMD5(t *testing.T) {
}
}
-// certMissingRSANULL contains an RSA public key where the AlgorithmIdentifer
+// certMissingRSANULL contains an RSA public key where the AlgorithmIdentifier
// parameters are omitted rather than being an ASN.1 NULL.
const certMissingRSANULL = `
-----BEGIN CERTIFICATE-----
diff --git a/src/go/internal/gccgoimporter/parser.go b/src/go/internal/gccgoimporter/parser.go
index 9204b004f9..e2ef33f7ae 100644
--- a/src/go/internal/gccgoimporter/parser.go
+++ b/src/go/internal/gccgoimporter/parser.go
@@ -326,7 +326,7 @@ func (p *parser) parseConstValue(pkg *types.Package) (val constant.Value, typ ty
if p.tok == '$' {
p.next()
if p.tok != scanner.Ident {
- p.errorf("expected identifer after '$', got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ p.errorf("expected identifier after '$', got %s (%q)", scanner.TokenString(p.tok), p.lit)
}
}
diff --git a/test/blank1.go b/test/blank1.go
index 1a9f012464..c9a8e6a290 100644
--- a/test/blank1.go
+++ b/test/blank1.go
@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Test that incorrect uses of the blank identifer are caught.
+// Test that incorrect uses of the blank identifier are caught.
// Does not compile.
package _ // ERROR "invalid package name"
diff --git a/test/chanlinear.go b/test/chanlinear.go
index 55fee4ab9b..4d55586dc8 100644
--- a/test/chanlinear.go
+++ b/test/chanlinear.go
@@ -5,7 +5,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Test that dequeueing from a pending channel doesn't
+// Test that dequeuing from a pending channel doesn't
// take linear time.
package main
diff --git a/test/fixedbugs/issue9521.go b/test/fixedbugs/issue9521.go
index 4e4a55f1e1..a33f0483f3 100644
--- a/test/fixedbugs/issue9521.go
+++ b/test/fixedbugs/issue9521.go
@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Test that an incorrect use of the blank identifer is caught.
+// Test that an incorrect use of the blank identifier is caught.
// Does not compile.
package main
--
cgit v1.2.3-54-g00ecf
From 534f56b4b2b7e368e27d87af2d7721c4ffde37ba Mon Sep 17 00:00:00 2001
From: Michał Łowicki
Date: Sun, 29 Mar 2020 17:59:08 +0100
Subject: doc: fix path to make.bash
Change-Id: I78c7197b8b93590470a782b492bba177a14d80ec
Reviewed-on: https://go-review.googlesource.com/c/go/+/226340
Reviewed-by: Ian Lance Taylor
---
doc/contribute.html | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/contribute.html b/doc/contribute.html
index 4135d13652..5fefac6bba 100644
--- a/doc/contribute.html
+++ b/doc/contribute.html
@@ -1006,7 +1006,7 @@ followed by run.bash.
In this section, we'll call the directory into which you cloned the Go repository $GODIR.
-The go tool built by $GODIR/make.bash will be installed
+The go tool built by $GODIR/src/make.bash will be installed
in $GODIR/bin/go and you
can invoke it to test your code.
For instance, if you
--
cgit v1.2.3-54-g00ecf
From 7bfac4c3ddde3dd906b344f141a9d09a5f855c77 Mon Sep 17 00:00:00 2001
From: Dmitri Shuralyov
Date: Sat, 28 Mar 2020 23:25:18 -0400
Subject: net/http: use DOMException.message property in error text
Previously, details about the underlying fetch error
were not visible in the net/http error text:
net/http: fetch() failed: