diff options
author | Keith Randall <khr@golang.org> | 2020-03-03 17:56:20 +0000 |
---|---|---|
committer | Keith Randall <khr@golang.org> | 2020-03-04 04:49:54 +0000 |
commit | cd9fd640db419ec81026945eb4f22bfe5ff5a27f (patch) | |
tree | 57da099235cea00c6f1be4b756bf00204b94ca65 /src/cmd/compile/internal/ssa/rewritegeneric.go | |
parent | 24343cb88640ae1e7dbfc4ec2f3ae81fc0aa07c7 (diff) | |
download | go-cd9fd640db419ec81026945eb4f22bfe5ff5a27f.tar.gz go-cd9fd640db419ec81026945eb4f22bfe5ff5a27f.zip |
cmd/compile: don't allow NaNs in floating-point constant ops
Trying this CL again, with a fixed test that allows platforms
to disagree on the exact behavior of converting NaNs.
We store 32-bit floating point constants in a 64-bit field, by
converting that 32-bit float to 64-bit float to store it, and convert
it back to use it.
That works for *almost* all floating-point constants. The exception is
signaling NaNs. The round trip described above means we can't represent
a 32-bit signaling NaN, because conversions strip the signaling bit.
To fix this issue, just forbid NaNs as floating-point constants in SSA
form. This shouldn't affect any real-world code, as people seldom
constant-propagate NaNs (except in test code).
Additionally, NaNs are somewhat underspecified (which of the many NaNs
do you get when dividing 0/0?), so when cross-compiling there's a
danger of using the compiler machine's NaN regime for some math, and
the target machine's NaN regime for other math. Better to use the
target machine's NaN regime always.
Update #36400
Change-Id: Idf203b688a15abceabbd66ba290d4e9f63619ecb
Reviewed-on: https://go-review.googlesource.com/c/go/+/221790
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewritegeneric.go')
-rw-r--r-- | src/cmd/compile/internal/ssa/rewritegeneric.go | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 9e743838ab..13873b2ac8 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -4734,6 +4734,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) for { if v_0.Op != OpConst32F { @@ -4744,6 +4745,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) { + break + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d)) return true @@ -5171,6 +5175,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Div64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) for { if v_0.Op != OpConst64F { @@ -5181,6 +5186,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool { break } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d)) return true @@ -10240,7 +10248,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) // result: (Const64F [x]) for { t1 := v.Type @@ -10256,7 +10264,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { break } v.reset(OpConst64F) @@ -10264,7 +10272,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) for { t1 := v.Type @@ -10280,7 +10288,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { break } v.reset(OpConst32F) @@ -13970,6 +13978,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul32F (Const32F [c]) (Const32F [d])) + // cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13981,6 +13990,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) { + continue + } v.reset(OpConst32F) v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) return true @@ -14210,6 +14222,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mul64F (Const64F [c]) (Const64F [d])) + // cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d)) // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -14221,6 +14234,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool { continue } d := v_1.AuxInt + if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) { + continue + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) return true @@ -20966,12 +20982,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool { func rewriteValuegeneric_OpSqrt(v *Value) bool { v_0 := v.Args[0] // match: (Sqrt (Const64F [c])) + // cond: !math.IsNaN(math.Sqrt(auxTo64F(c))) // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) for { if v_0.Op != OpConst64F { break } c := v_0.AuxInt + if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) { + break + } v.reset(OpConst64F) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c))) return true |