aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/gen
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2020-12-09 14:59:40 -0800
committerKeith Randall <khr@golang.org>2021-02-23 20:05:01 +0000
commitd2911d76127deaa08644979cec7d990559f0aa54 (patch)
treebbb8f48fbcbdebc660deb24f83d0961d4d2caa03 /src/cmd/compile/internal/ssa/gen
parentd434c2338b11b9ecf19865e8ec3f2721706f29cf (diff)
downloadgo-d2911d76127deaa08644979cec7d990559f0aa54.tar.gz
go-d2911d76127deaa08644979cec7d990559f0aa54.zip
cmd/compile: fold MOV*nop and MOV*const
MOV*nop and MOV*reg seem superfluous. They are there to keep type information around that would otherwise get thrown away. Not sure what we need it for. I think our compiler needs a normalization of how types are represented in SSA, especially after lowering. MOV*nop gets in the way of some optimization rules firing, like for load combining. For now, just fold MOV*nop and MOV*const. It's certainly safe to do that, as the type info on the MOV*const isn't ever useful. R=go1.17 Change-Id: I3630a80afc2455a8e9cd9fde10c7abe05ddc3767 Reviewed-on: https://go-review.googlesource.com/c/go/+/276792 Trust: Keith Randall <khr@golang.org> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src/cmd/compile/internal/ssa/gen')
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules4
5 files changed, 20 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index de0df363e4..cbafd12a4f 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -546,6 +546,10 @@
// MOVWnop doesn't emit instruction, only for ensuring the type.
(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+// TODO: we should be able to get rid of MOVWnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
+
// mul by constant
(MUL x (MOVWconst [c])) && int32(c) == -1 => (RSBconst [0] x)
(MUL _ (MOVWconst [0])) => (MOVWconst [0])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index a0e2a0d5e2..4531c38a7a 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -1127,6 +1127,10 @@
// MOVDnop doesn't emit instruction, only for ensuring the type.
(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+// TODO: we should be able to get rid of MOVDnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+
// fold constant into arithmatic ops
(ADD x (MOVDconst [c])) => (ADDconst [c] x)
(SUB x (MOVDconst [c])) => (SUBconst [c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 8ad2c90ac3..bc1ce82940 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -559,6 +559,10 @@
// MOVWnop doesn't emit instruction, only for ensuring the type.
(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+// TODO: we should be able to get rid of MOVWnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
+
// fold constant into arithmatic ops
(ADD x (MOVWconst [c])) => (ADDconst [c] x)
(SUB x (MOVWconst [c])) => (SUBconst [c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index 088c9b1ac4..e3f7633274 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -558,6 +558,10 @@
// MOVVnop doesn't emit instruction, only for ensuring the type.
(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
+// TODO: we should be able to get rid of MOVVnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
+
// fold constant into arithmatic ops
(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x)
(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 15361fd37a..9119ebc0e8 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -673,6 +673,10 @@
// MOVnop does not emit an instruction, only for ensuring the type.
(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+// TODO: we should be able to get rid of MOVDnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+
// Fold constant into immediate instructions where possible.
(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)