aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile')
-rw-r--r--src/cmd/compile/fmtmap_test.go1
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go2
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go18
-rw-r--r--src/cmd/compile/internal/gc/alg.go152
-rw-r--r--src/cmd/compile/internal/gc/align.go12
-rw-r--r--src/cmd/compile/internal/gc/bench_test.go12
-rw-r--r--src/cmd/compile/internal/gc/bexport.go43
-rw-r--r--src/cmd/compile/internal/gc/builtin.go431
-rw-r--r--src/cmd/compile/internal/gc/builtin/runtime.go1
-rw-r--r--src/cmd/compile/internal/gc/closure.go8
-rw-r--r--src/cmd/compile/internal/gc/const.go71
-rw-r--r--src/cmd/compile/internal/gc/dcl.go2
-rw-r--r--src/cmd/compile/internal/gc/dwinl.go4
-rw-r--r--src/cmd/compile/internal/gc/embed.go273
-rw-r--r--src/cmd/compile/internal/gc/esc.go47
-rw-r--r--src/cmd/compile/internal/gc/escape.go47
-rw-r--r--src/cmd/compile/internal/gc/export.go12
-rw-r--r--src/cmd/compile/internal/gc/float_test.go19
-rw-r--r--src/cmd/compile/internal/gc/fmt.go27
-rw-r--r--src/cmd/compile/internal/gc/go.go21
-rw-r--r--src/cmd/compile/internal/gc/gsubr.go4
-rw-r--r--src/cmd/compile/internal/gc/iexport.go21
-rw-r--r--src/cmd/compile/internal/gc/iimport.go10
-rw-r--r--src/cmd/compile/internal/gc/inl.go273
-rw-r--r--src/cmd/compile/internal/gc/inl_test.go3
-rw-r--r--src/cmd/compile/internal/gc/lex.go7
-rw-r--r--src/cmd/compile/internal/gc/main.go119
-rw-r--r--src/cmd/compile/internal/gc/noder.go157
-rw-r--r--src/cmd/compile/internal/gc/obj.go143
-rw-r--r--src/cmd/compile/internal/gc/order.go13
-rw-r--r--src/cmd/compile/internal/gc/pgen.go18
-rw-r--r--src/cmd/compile/internal/gc/plive.go9
-rw-r--r--src/cmd/compile/internal/gc/range.go17
-rw-r--r--src/cmd/compile/internal/gc/reflect.go3
-rw-r--r--src/cmd/compile/internal/gc/scope.go4
-rw-r--r--src/cmd/compile/internal/gc/sinit.go27
-rw-r--r--src/cmd/compile/internal/gc/ssa.go400
-rw-r--r--src/cmd/compile/internal/gc/subr.go189
-rw-r--r--src/cmd/compile/internal/gc/swt.go29
-rw-r--r--src/cmd/compile/internal/gc/syntax.go98
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go71
-rw-r--r--src/cmd/compile/internal/gc/universe.go10
-rw-r--r--src/cmd/compile/internal/gc/walk.go179
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go3
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go55
-rw-r--r--src/cmd/compile/internal/riscv64/ggen.go10
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go52
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go8
-rw-r--r--src/cmd/compile/internal/ssa/addressingmodes.go8
-rw-r--r--src/cmd/compile/internal/ssa/branchelim.go2
-rw-r--r--src/cmd/compile/internal/ssa/compile.go12
-rw-r--r--src/cmd/compile/internal/ssa/config.go15
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go87
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go786
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go4
-rw-r--r--src/cmd/compile/internal/ssa/flagalloc.go5
-rw-r--r--src/cmd/compile/internal/ssa/func.go2
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules12
-rw-r--r--src/cmd/compile/internal/ssa/gen/386Ops.go14
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules14
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules34
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules182
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go6
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules11
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules6
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules46
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go23
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules127
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64Ops.go56
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules3
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390XOps.go4
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules137
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules44
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go22
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go109
-rw-r--r--src/cmd/compile/internal/ssa/html.go13
-rw-r--r--src/cmd/compile/internal/ssa/op.go47
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go818
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go69
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go181
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go88
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go54
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go421
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go394
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go22
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go12
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go496
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go1043
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go6
-rw-r--r--src/cmd/compile/internal/ssa/rewrite_test.go181
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go796
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go220
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit.go5
-rw-r--r--src/cmd/compile/internal/ssa/value.go3
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go6
-rw-r--r--src/cmd/compile/internal/syntax/parser.go10
-rw-r--r--src/cmd/compile/internal/types/type.go14
-rw-r--r--src/cmd/compile/internal/x86/387.go403
-rw-r--r--src/cmd/compile/internal/x86/galign.go16
-rw-r--r--src/cmd/compile/internal/x86/ssa.go2
101 files changed, 7061 insertions, 3167 deletions
diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go
index 179c60187f..f8c33ec1f9 100644
--- a/src/cmd/compile/fmtmap_test.go
+++ b/src/cmd/compile/fmtmap_test.go
@@ -136,6 +136,7 @@ var knownFormats = map[string]string{
"cmd/compile/internal/types.EType %s": "",
"cmd/compile/internal/types.EType %v": "",
"cmd/internal/obj.ABI %v": "",
+ "cmd/internal/src.XPos %v": "",
"error %v": "",
"float64 %.2f": "",
"float64 %.3f": "",
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 4ac877986c..f30a47b903 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -1210,7 +1210,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
- case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
+ case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock:
s.Prog(x86.ALOCK)
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 1d6ea6b9d8..5c695ef84c 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -688,15 +688,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.To.Reg = out
gc.Patch(p2, p5)
case ssa.OpARM64LoweredAtomicAnd8,
- ssa.OpARM64LoweredAtomicOr8:
- // LDAXRB (Rarg0), Rout
+ ssa.OpARM64LoweredAtomicAnd32,
+ ssa.OpARM64LoweredAtomicOr8,
+ ssa.OpARM64LoweredAtomicOr32:
+ // LDAXRB/LDAXRW (Rarg0), Rout
// AND/OR Rarg1, Rout
- // STLXRB Rout, (Rarg0), Rtmp
+ // STLXRB/STLXRB Rout, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXRB
+ st := arm64.ASTLXRB
+ if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
- p := s.Prog(arm64.ALDAXRB)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
@@ -706,7 +714,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
- p2 := s.Prog(arm64.ASTLXRB)
+ p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = out
p2.To.Type = obj.TYPE_MEM
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
index 6302b88f59..2f7fa27bb9 100644
--- a/src/cmd/compile/internal/gc/alg.go
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -282,7 +282,7 @@ func genhash(t *types.Type) *obj.LSym {
}
sym := typesymprefix(".hash", t)
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
@@ -374,7 +374,7 @@ func genhash(t *types.Type) *obj.LSym {
r.List.Append(nh)
fn.Nbody.Append(r)
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
dumplist("genhash body", fn.Nbody)
}
@@ -509,7 +509,7 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
sym := typesymprefix(".eq", t)
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
fmt.Printf("geneq %v\n", t)
}
@@ -529,6 +529,10 @@ func geneq(t *types.Type) *obj.LSym {
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
+ nr := asNode(tfn.Type.Results().Field(0).Nname)
+
+ // Label to jump to if an equality test fails.
+ neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
@@ -555,13 +559,13 @@ func geneq(t *types.Type) *obj.LSym {
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
- // return
+ // goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
- checkAll := func(unroll int64, eq func(pi, qi *Node) *Node) {
+ checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
// pi := p[i]
@@ -576,37 +580,38 @@ func geneq(t *types.Type) *obj.LSym {
}
if nelem <= unroll {
+ if last {
+ // Do last comparison in a different manner.
+ nelem--
+ }
// Generate a series of checks.
- var cond *Node
for i := int64(0); i < nelem; i++ {
- c := nodintconst(i)
- check := checkIdx(c)
- if cond == nil {
- cond = check
- continue
- }
- cond = nod(OANDAND, cond, check)
+ // if check {} else { goto neq }
+ nif := nod(OIF, checkIdx(nodintconst(i)), nil)
+ nif.Rlist.Append(nodSym(OGOTO, nil, neq))
+ fn.Nbody.Append(nif)
+ }
+ if last {
+ fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
+ }
+ } else {
+ // Generate a for loop.
+ // for i := 0; i < nelem; i++
+ i := temp(types.Types[TINT])
+ init := nod(OAS, i, nodintconst(0))
+ cond := nod(OLT, i, nodintconst(nelem))
+ post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
+ loop := nod(OFOR, cond, post)
+ loop.Ninit.Append(init)
+ // if eq(pi, qi) {} else { goto neq }
+ nif := nod(OIF, checkIdx(i), nil)
+ nif.Rlist.Append(nodSym(OGOTO, nil, neq))
+ loop.Nbody.Append(nif)
+ fn.Nbody.Append(loop)
+ if last {
+ fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
}
- nif := nod(OIF, cond, nil)
- nif.Rlist.Append(nod(ORETURN, nil, nil))
- fn.Nbody.Append(nif)
- return
}
-
- // Generate a for loop.
- // for i := 0; i < nelem; i++
- i := temp(types.Types[TINT])
- init := nod(OAS, i, nodintconst(0))
- cond := nod(OLT, i, nodintconst(nelem))
- post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
- loop := nod(OFOR, cond, post)
- loop.Ninit.Append(init)
- // if eq(pi, qi) {} else { return }
- check := checkIdx(i)
- nif := nod(OIF, check, nil)
- nif.Rlist.Append(nod(ORETURN, nil, nil))
- loop.Nbody.Append(nif)
- fn.Nbody.Append(loop)
}
switch t.Elem().Etype {
@@ -614,32 +619,28 @@ func geneq(t *types.Type) *obj.LSym {
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
- checkAll(3, func(pi, qi *Node) *Node {
+ checkAll(3, false, func(pi, qi *Node) *Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
- checkAll(1, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi *Node) *Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
- checkAll(2, func(pi, qi *Node) *Node {
+ checkAll(2, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
- checkAll(1, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
}
- // return true
- ret := nod(ORETURN, nil, nil)
- ret.List.Append(nodbool(true))
- fn.Nbody.Append(ret)
case TSTRUCT:
// Build a list of conditions to satisfy.
@@ -717,22 +718,42 @@ func geneq(t *types.Type) *obj.LSym {
flatConds = append(flatConds, c...)
}
- var cond *Node
if len(flatConds) == 0 {
- cond = nodbool(true)
+ fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
} else {
- cond = flatConds[0]
- for _, c := range flatConds[1:] {
- cond = nod(OANDAND, cond, c)
+ for _, c := range flatConds[:len(flatConds)-1] {
+ // if cond {} else { goto neq }
+ n := nod(OIF, c, nil)
+ n.Rlist.Append(nodSym(OGOTO, nil, neq))
+ fn.Nbody.Append(n)
}
+ fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
}
+ }
- ret := nod(ORETURN, nil, nil)
- ret.List.Append(cond)
- fn.Nbody.Append(ret)
+ // ret:
+ // return
+ ret := autolabel(".ret")
+ fn.Nbody.Append(nodSym(OLABEL, nil, ret))
+ fn.Nbody.Append(nod(ORETURN, nil, nil))
+
+ // neq:
+ // r = false
+ // return (or goto ret)
+ fn.Nbody.Append(nodSym(OLABEL, nil, neq))
+ fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
+ if EqCanPanic(t) || hasCall(fn) {
+ // Epilogue is large, so share it with the equal case.
+ fn.Nbody.Append(nodSym(OGOTO, nil, ret))
+ } else {
+ // Epilogue is small, so don't bother sharing.
+ fn.Nbody.Append(nod(ORETURN, nil, nil))
}
+ // TODO(khr): the epilogue size detection condition above isn't perfect.
+ // We should really do a generic CL that shares epilogues across
+ // the board. See #24936.
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
}
@@ -762,6 +783,39 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
+func hasCall(n *Node) bool {
+ if n.Op == OCALL || n.Op == OCALLFUNC {
+ return true
+ }
+ if n.Left != nil && hasCall(n.Left) {
+ return true
+ }
+ if n.Right != nil && hasCall(n.Right) {
+ return true
+ }
+ for _, x := range n.Ninit.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.Nbody.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.List.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.Rlist.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ return false
+}
+
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index 5af403afa3..a3a0c8fce8 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -86,7 +86,7 @@ func expandiface(t *types.Type) {
sort.Sort(methcmp(methods))
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
- yyerror("interface too large")
+ yyerrorl(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(Widthptr)
@@ -150,7 +150,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
- yyerror("type %L too large", errtype)
+ yyerrorl(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
@@ -199,7 +199,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
}
*path = append(*path, t)
- if findTypeLoop(asNode(t.Nod).Name.Param.Ntype.Type, path) {
+ if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
return true
}
*path = (*path)[:len(*path)-1]
@@ -381,7 +381,7 @@ func dowidth(t *types.Type) {
t1 := t.ChanArgs()
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
- yyerror("channel element type too large (>64kB)")
+ yyerrorl(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
@@ -414,7 +414,7 @@ func dowidth(t *types.Type) {
if t.Elem().Width != 0 {
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
- yyerror("type %L larger than address space", t)
+ yyerrorl(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
@@ -456,7 +456,7 @@ func dowidth(t *types.Type) {
}
if Widthptr == 4 && w != int64(int32(w)) {
- yyerror("type %v too large", t)
+ yyerrorl(typePos(t), "type %v too large", t)
}
t.Width = w
diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/gc/bench_test.go
index a2887f2f7b..8c4288128f 100644
--- a/src/cmd/compile/internal/gc/bench_test.go
+++ b/src/cmd/compile/internal/gc/bench_test.go
@@ -7,6 +7,7 @@ package gc
import "testing"
var globl int64
+var globl32 int32
func BenchmarkLoadAdd(b *testing.B) {
x := make([]int64, 1024)
@@ -42,6 +43,17 @@ func BenchmarkModify(b *testing.B) {
}
}
+func BenchmarkMullImm(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int32
+ for i := range x {
+ s += x[i] * 100
+ }
+ globl32 = s
+ }
+}
+
func BenchmarkConstModify(b *testing.B) {
a := make([]int64, 1024)
for i := 0; i < b.N; i++ {
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
index 5ced66c0da..10f21f86df 100644
--- a/src/cmd/compile/internal/gc/bexport.go
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -81,11 +81,6 @@ func (p *exporter) markType(t *types.Type) {
}
}
-// deltaNewFile is a magic line delta offset indicating a new file.
-// We use -64 because it is rare; see issue 20080 and CL 41619.
-// -64 is the smallest int that fits in a single byte as a varint.
-const deltaNewFile = -64
-
// ----------------------------------------------------------------------------
// Export format
@@ -126,30 +121,6 @@ const (
aliasTag
)
-// untype returns the "pseudo" untyped type for a Ctype (import/export use only).
-// (we can't use a pre-initialized array because we must be sure all types are
-// set up)
-func untype(ctype Ctype) *types.Type {
- switch ctype {
- case CTINT:
- return types.Idealint
- case CTRUNE:
- return types.Idealrune
- case CTFLT:
- return types.Idealfloat
- case CTCPLX:
- return types.Idealcomplex
- case CTSTR:
- return types.Idealstring
- case CTBOOL:
- return types.Idealbool
- case CTNIL:
- return types.Types[TNIL]
- }
- Fatalf("exporter: unknown Ctype")
- return nil
-}
-
var predecl []*types.Type // initialized lazily
func predeclared() []*types.Type {
@@ -184,13 +155,13 @@ func predeclared() []*types.Type {
types.Errortype,
// untyped types
- untype(CTBOOL),
- untype(CTINT),
- untype(CTRUNE),
- untype(CTFLT),
- untype(CTCPLX),
- untype(CTSTR),
- untype(CTNIL),
+ types.UntypedBool,
+ types.UntypedInt,
+ types.UntypedRune,
+ types.UntypedFloat,
+ types.UntypedComplex,
+ types.UntypedString,
+ types.Types[TNIL],
// package unsafe
types.Types[TUNSAFEPTR],
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index da7b107bfe..fd95b657b2 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -44,6 +44,7 @@ var runtimeDecls = [...]struct {
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
+ {"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
@@ -51,134 +52,134 @@ var runtimeDecls = [...]struct {
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
- {"concatstring2", funcTag, 33},
- {"concatstring3", funcTag, 34},
- {"concatstring4", funcTag, 35},
- {"concatstring5", funcTag, 36},
- {"concatstrings", funcTag, 38},
- {"cmpstring", funcTag, 39},
- {"intstring", funcTag, 42},
- {"slicebytetostring", funcTag, 43},
- {"slicebytetostringtmp", funcTag, 44},
- {"slicerunetostring", funcTag, 47},
- {"stringtoslicebyte", funcTag, 49},
- {"stringtoslicerune", funcTag, 52},
- {"slicecopy", funcTag, 53},
- {"decoderune", funcTag, 54},
- {"countrunes", funcTag, 55},
- {"convI2I", funcTag, 56},
- {"convT16", funcTag, 57},
- {"convT32", funcTag, 57},
- {"convT64", funcTag, 57},
- {"convTstring", funcTag, 57},
- {"convTslice", funcTag, 57},
- {"convT2E", funcTag, 58},
- {"convT2Enoptr", funcTag, 58},
- {"convT2I", funcTag, 58},
- {"convT2Inoptr", funcTag, 58},
- {"assertE2I", funcTag, 56},
- {"assertE2I2", funcTag, 59},
- {"assertI2I", funcTag, 56},
- {"assertI2I2", funcTag, 59},
- {"panicdottypeE", funcTag, 60},
- {"panicdottypeI", funcTag, 60},
- {"panicnildottype", funcTag, 61},
- {"ifaceeq", funcTag, 63},
- {"efaceeq", funcTag, 63},
- {"fastrand", funcTag, 65},
- {"makemap64", funcTag, 67},
- {"makemap", funcTag, 68},
- {"makemap_small", funcTag, 69},
- {"mapaccess1", funcTag, 70},
- {"mapaccess1_fast32", funcTag, 71},
- {"mapaccess1_fast64", funcTag, 71},
- {"mapaccess1_faststr", funcTag, 71},
- {"mapaccess1_fat", funcTag, 72},
- {"mapaccess2", funcTag, 73},
- {"mapaccess2_fast32", funcTag, 74},
- {"mapaccess2_fast64", funcTag, 74},
- {"mapaccess2_faststr", funcTag, 74},
- {"mapaccess2_fat", funcTag, 75},
- {"mapassign", funcTag, 70},
- {"mapassign_fast32", funcTag, 71},
- {"mapassign_fast32ptr", funcTag, 71},
- {"mapassign_fast64", funcTag, 71},
- {"mapassign_fast64ptr", funcTag, 71},
- {"mapassign_faststr", funcTag, 71},
- {"mapiterinit", funcTag, 76},
- {"mapdelete", funcTag, 76},
- {"mapdelete_fast32", funcTag, 77},
- {"mapdelete_fast64", funcTag, 77},
- {"mapdelete_faststr", funcTag, 77},
- {"mapiternext", funcTag, 78},
- {"mapclear", funcTag, 79},
- {"makechan64", funcTag, 81},
- {"makechan", funcTag, 82},
- {"chanrecv1", funcTag, 84},
- {"chanrecv2", funcTag, 85},
- {"chansend1", funcTag, 87},
+ {"concatstring2", funcTag, 34},
+ {"concatstring3", funcTag, 35},
+ {"concatstring4", funcTag, 36},
+ {"concatstring5", funcTag, 37},
+ {"concatstrings", funcTag, 39},
+ {"cmpstring", funcTag, 40},
+ {"intstring", funcTag, 43},
+ {"slicebytetostring", funcTag, 44},
+ {"slicebytetostringtmp", funcTag, 45},
+ {"slicerunetostring", funcTag, 48},
+ {"stringtoslicebyte", funcTag, 50},
+ {"stringtoslicerune", funcTag, 53},
+ {"slicecopy", funcTag, 54},
+ {"decoderune", funcTag, 55},
+ {"countrunes", funcTag, 56},
+ {"convI2I", funcTag, 57},
+ {"convT16", funcTag, 58},
+ {"convT32", funcTag, 58},
+ {"convT64", funcTag, 58},
+ {"convTstring", funcTag, 58},
+ {"convTslice", funcTag, 58},
+ {"convT2E", funcTag, 59},
+ {"convT2Enoptr", funcTag, 59},
+ {"convT2I", funcTag, 59},
+ {"convT2Inoptr", funcTag, 59},
+ {"assertE2I", funcTag, 57},
+ {"assertE2I2", funcTag, 60},
+ {"assertI2I", funcTag, 57},
+ {"assertI2I2", funcTag, 60},
+ {"panicdottypeE", funcTag, 61},
+ {"panicdottypeI", funcTag, 61},
+ {"panicnildottype", funcTag, 62},
+ {"ifaceeq", funcTag, 64},
+ {"efaceeq", funcTag, 64},
+ {"fastrand", funcTag, 66},
+ {"makemap64", funcTag, 68},
+ {"makemap", funcTag, 69},
+ {"makemap_small", funcTag, 70},
+ {"mapaccess1", funcTag, 71},
+ {"mapaccess1_fast32", funcTag, 72},
+ {"mapaccess1_fast64", funcTag, 72},
+ {"mapaccess1_faststr", funcTag, 72},
+ {"mapaccess1_fat", funcTag, 73},
+ {"mapaccess2", funcTag, 74},
+ {"mapaccess2_fast32", funcTag, 75},
+ {"mapaccess2_fast64", funcTag, 75},
+ {"mapaccess2_faststr", funcTag, 75},
+ {"mapaccess2_fat", funcTag, 76},
+ {"mapassign", funcTag, 71},
+ {"mapassign_fast32", funcTag, 72},
+ {"mapassign_fast32ptr", funcTag, 72},
+ {"mapassign_fast64", funcTag, 72},
+ {"mapassign_fast64ptr", funcTag, 72},
+ {"mapassign_faststr", funcTag, 72},
+ {"mapiterinit", funcTag, 77},
+ {"mapdelete", funcTag, 77},
+ {"mapdelete_fast32", funcTag, 78},
+ {"mapdelete_fast64", funcTag, 78},
+ {"mapdelete_faststr", funcTag, 78},
+ {"mapiternext", funcTag, 79},
+ {"mapclear", funcTag, 80},
+ {"makechan64", funcTag, 82},
+ {"makechan", funcTag, 83},
+ {"chanrecv1", funcTag, 85},
+ {"chanrecv2", funcTag, 86},
+ {"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
- {"writeBarrier", varTag, 89},
- {"typedmemmove", funcTag, 90},
- {"typedmemclr", funcTag, 91},
- {"typedslicecopy", funcTag, 92},
- {"selectnbsend", funcTag, 93},
- {"selectnbrecv", funcTag, 94},
- {"selectnbrecv2", funcTag, 96},
- {"selectsetpc", funcTag, 97},
- {"selectgo", funcTag, 98},
+ {"writeBarrier", varTag, 90},
+ {"typedmemmove", funcTag, 91},
+ {"typedmemclr", funcTag, 92},
+ {"typedslicecopy", funcTag, 93},
+ {"selectnbsend", funcTag, 94},
+ {"selectnbrecv", funcTag, 95},
+ {"selectnbrecv2", funcTag, 97},
+ {"selectsetpc", funcTag, 98},
+ {"selectgo", funcTag, 99},
{"block", funcTag, 9},
- {"makeslice", funcTag, 99},
- {"makeslice64", funcTag, 100},
- {"makeslicecopy", funcTag, 101},
- {"growslice", funcTag, 103},
- {"memmove", funcTag, 104},
- {"memclrNoHeapPointers", funcTag, 105},
- {"memclrHasPointers", funcTag, 105},
- {"memequal", funcTag, 106},
- {"memequal0", funcTag, 107},
- {"memequal8", funcTag, 107},
- {"memequal16", funcTag, 107},
- {"memequal32", funcTag, 107},
- {"memequal64", funcTag, 107},
- {"memequal128", funcTag, 107},
- {"f32equal", funcTag, 108},
- {"f64equal", funcTag, 108},
- {"c64equal", funcTag, 108},
- {"c128equal", funcTag, 108},
- {"strequal", funcTag, 108},
- {"interequal", funcTag, 108},
- {"nilinterequal", funcTag, 108},
- {"memhash", funcTag, 109},
- {"memhash0", funcTag, 110},
- {"memhash8", funcTag, 110},
- {"memhash16", funcTag, 110},
- {"memhash32", funcTag, 110},
- {"memhash64", funcTag, 110},
- {"memhash128", funcTag, 110},
- {"f32hash", funcTag, 110},
- {"f64hash", funcTag, 110},
- {"c64hash", funcTag, 110},
- {"c128hash", funcTag, 110},
- {"strhash", funcTag, 110},
- {"interhash", funcTag, 110},
- {"nilinterhash", funcTag, 110},
- {"int64div", funcTag, 111},
- {"uint64div", funcTag, 112},
- {"int64mod", funcTag, 111},
- {"uint64mod", funcTag, 112},
- {"float64toint64", funcTag, 113},
- {"float64touint64", funcTag, 114},
- {"float64touint32", funcTag, 115},
- {"int64tofloat64", funcTag, 116},
- {"uint64tofloat64", funcTag, 117},
- {"uint32tofloat64", funcTag, 118},
- {"complex128div", funcTag, 119},
- {"racefuncenter", funcTag, 120},
+ {"makeslice", funcTag, 100},
+ {"makeslice64", funcTag, 101},
+ {"makeslicecopy", funcTag, 102},
+ {"growslice", funcTag, 104},
+ {"memmove", funcTag, 105},
+ {"memclrNoHeapPointers", funcTag, 106},
+ {"memclrHasPointers", funcTag, 106},
+ {"memequal", funcTag, 107},
+ {"memequal0", funcTag, 108},
+ {"memequal8", funcTag, 108},
+ {"memequal16", funcTag, 108},
+ {"memequal32", funcTag, 108},
+ {"memequal64", funcTag, 108},
+ {"memequal128", funcTag, 108},
+ {"f32equal", funcTag, 109},
+ {"f64equal", funcTag, 109},
+ {"c64equal", funcTag, 109},
+ {"c128equal", funcTag, 109},
+ {"strequal", funcTag, 109},
+ {"interequal", funcTag, 109},
+ {"nilinterequal", funcTag, 109},
+ {"memhash", funcTag, 110},
+ {"memhash0", funcTag, 111},
+ {"memhash8", funcTag, 111},
+ {"memhash16", funcTag, 111},
+ {"memhash32", funcTag, 111},
+ {"memhash64", funcTag, 111},
+ {"memhash128", funcTag, 111},
+ {"f32hash", funcTag, 111},
+ {"f64hash", funcTag, 111},
+ {"c64hash", funcTag, 111},
+ {"c128hash", funcTag, 111},
+ {"strhash", funcTag, 111},
+ {"interhash", funcTag, 111},
+ {"nilinterhash", funcTag, 111},
+ {"int64div", funcTag, 112},
+ {"uint64div", funcTag, 113},
+ {"int64mod", funcTag, 112},
+ {"uint64mod", funcTag, 113},
+ {"float64toint64", funcTag, 114},
+ {"float64touint64", funcTag, 115},
+ {"float64touint32", funcTag, 116},
+ {"int64tofloat64", funcTag, 117},
+ {"uint64tofloat64", funcTag, 118},
+ {"uint32tofloat64", funcTag, 119},
+ {"complex128div", funcTag, 120},
+ {"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
- {"raceread", funcTag, 120},
- {"racewrite", funcTag, 120},
+ {"raceread", funcTag, 31},
+ {"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
@@ -233,96 +234,96 @@ func runtimeTypes() []*types.Type {
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
- typs[31] = types.NewArray(typs[0], 32)
- typs[32] = types.NewPtr(typs[31])
- typs[33] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[34] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[35] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[36] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[37] = types.NewSlice(typs[28])
- typs[38] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[37])}, []*Node{anonfield(typs[28])})
- typs[39] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
- typs[40] = types.NewArray(typs[0], 4)
- typs[41] = types.NewPtr(typs[40])
- typs[42] = functype(nil, []*Node{anonfield(typs[41]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
- typs[43] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[44] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[45] = types.Runetype
- typs[46] = types.NewSlice(typs[45])
- typs[47] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[46])}, []*Node{anonfield(typs[28])})
- typs[48] = types.NewSlice(typs[0])
- typs[49] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28])}, []*Node{anonfield(typs[48])})
- typs[50] = types.NewArray(typs[45], 32)
- typs[51] = types.NewPtr(typs[50])
- typs[52] = functype(nil, []*Node{anonfield(typs[51]), anonfield(typs[28])}, []*Node{anonfield(typs[46])})
- typs[53] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
- typs[54] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[45]), anonfield(typs[15])})
- typs[55] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
- typs[56] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
- typs[57] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
- typs[58] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
- typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
- typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[61] = functype(nil, []*Node{anonfield(typs[1])}, nil)
- typs[62] = types.NewPtr(typs[5])
- typs[63] = functype(nil, []*Node{anonfield(typs[62]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[64] = types.Types[TUINT32]
- typs[65] = functype(nil, nil, []*Node{anonfield(typs[64])})
- typs[66] = types.NewMap(typs[2], typs[2])
- typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
- typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
- typs[69] = functype(nil, nil, []*Node{anonfield(typs[66])})
- typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
- typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
- typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, nil)
- typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, nil)
- typs[78] = functype(nil, []*Node{anonfield(typs[3])}, nil)
- typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66])}, nil)
- typs[80] = types.NewChan(typs[2], types.Cboth)
- typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[80])})
- typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[80])})
- typs[83] = types.NewChan(typs[2], types.Crecv)
- typs[84] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, nil)
- typs[85] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[86] = types.NewChan(typs[2], types.Csend)
- typs[87] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, nil)
- typs[88] = types.NewArray(typs[0], 3)
- typs[89] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[88]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
- typs[93] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
- typs[95] = types.NewPtr(typs[6])
- typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[95]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
- typs[97] = functype(nil, []*Node{anonfield(typs[62])}, nil)
- typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
- typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
- typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
- typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
- typs[102] = types.NewSlice(typs[2])
- typs[103] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[102]), anonfield(typs[15])}, []*Node{anonfield(typs[102])})
- typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[105] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
- typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[108] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[111] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
- typs[112] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
- typs[113] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
- typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
- typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[64])})
- typs[116] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
- typs[117] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
- typs[118] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[20])})
- typs[119] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
- typs[120] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[32] = types.NewArray(typs[0], 32)
+ typs[33] = types.NewPtr(typs[32])
+ typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[38] = types.NewSlice(typs[28])
+ typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
+ typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[41] = types.NewArray(typs[0], 4)
+ typs[42] = types.NewPtr(typs[41])
+ typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
+ typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[46] = types.Runetype
+ typs[47] = types.NewSlice(typs[46])
+ typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
+ typs[49] = types.NewSlice(typs[0])
+ typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
+ typs[51] = types.NewArray(typs[46], 32)
+ typs[52] = types.NewPtr(typs[51])
+ typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
+ typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
+ typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
+ typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
+ typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
+ typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
+ typs[63] = types.NewPtr(typs[5])
+ typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
+ typs[65] = types.Types[TUINT32]
+ typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
+ typs[67] = types.NewMap(typs[2], typs[2])
+ typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
+ typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
+ typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
+ typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
+ typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
+ typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[81] = types.NewChan(typs[2], types.Cboth)
+ typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
+ typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
+ typs[84] = types.NewChan(typs[2], types.Crecv)
+ typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[87] = types.NewChan(typs[2], types.Csend)
+ typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[89] = types.NewArray(typs[0], 3)
+ typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
+ typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[96] = types.NewPtr(typs[6])
+ typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
+ typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
+ typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
+ typs[103] = types.NewSlice(typs[2])
+ typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
+ typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
+ typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
+ typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
+ typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
+ typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
+ typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
+ typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
+ typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
+ typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
+ typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
+ typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
+ typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
+ typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
@@ -331,7 +332,7 @@ func runtimeTypes() []*types.Type {
typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[TUINT16]
typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
- typs[129] = functype(nil, []*Node{anonfield(typs[64]), anonfield(typs[64])}, nil)
+ typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 02d6c7b7f5..aac2de38c6 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -54,6 +54,7 @@ func printuint(uint64)
func printcomplex(complex128)
func printstring(string)
func printpointer(any)
+func printuintptr(uintptr)
func printiface(any)
func printeface(any)
func printslice(any)
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index 250be38e5b..902d2e34a3 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -198,7 +198,7 @@ func capturevars(xfunc *Node) {
outer = nod(OADDR, outer, nil)
}
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
@@ -434,6 +434,8 @@ func typecheckpartialcall(fn *Node, sym *types.Sym) {
fn.Type = xfunc.Type
}
+// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
+// for partial calls.
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
rcvrtype := fn.Left.Type
sym := methodSymSuffix(rcvrtype, meth, "-fm")
@@ -500,6 +502,10 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
funcbody()
xfunc = typecheck(xfunc, ctxStmt)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ Curfn = xfunc
+ typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = savecurfn
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index c0ed8192d9..b92c8d66b5 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -114,16 +114,16 @@ func (v Val) Interface() interface{} {
type NilVal struct{}
-// Int64 returns n as an int64.
+// Int64Val returns n as an int64.
// n must be an integer or rune constant.
-func (n *Node) Int64() int64 {
+func (n *Node) Int64Val() int64 {
if !Isconst(n, CTINT) {
- Fatalf("Int64(%v)", n)
+ Fatalf("Int64Val(%v)", n)
}
return n.Val().U.(*Mpint).Int64()
}
-// CanInt64 reports whether it is safe to call Int64() on n.
+// CanInt64 reports whether it is safe to call Int64Val() on n.
func (n *Node) CanInt64() bool {
if !Isconst(n, CTINT) {
return false
@@ -131,18 +131,27 @@ func (n *Node) CanInt64() bool {
// if the value inside n cannot be represented as an int64, the
// return value of Int64 is undefined
- return n.Val().U.(*Mpint).CmpInt64(n.Int64()) == 0
+ return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
}
-// Bool returns n as a bool.
+// BoolVal returns n as a bool.
// n must be a boolean constant.
-func (n *Node) Bool() bool {
+func (n *Node) BoolVal() bool {
if !Isconst(n, CTBOOL) {
- Fatalf("Bool(%v)", n)
+ Fatalf("BoolVal(%v)", n)
}
return n.Val().U.(bool)
}
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func (n *Node) StringVal() string {
+ if !Isconst(n, CTSTR) {
+ Fatalf("StringVal(%v)", n)
+ }
+ return n.Val().U.(string)
+}
+
// truncate float literal fv to 32-bit or 64-bit precision
// according to type; return truncated value.
func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
@@ -612,7 +621,7 @@ func evconst(n *Node) {
var strs []string
i2 := i1
for i2 < len(s) && Isconst(s[i2], CTSTR) {
- strs = append(strs, strlit(s[i2]))
+ strs = append(strs, s[i2].StringVal())
i2++
}
@@ -635,7 +644,7 @@ func evconst(n *Node) {
switch nl.Type.Etype {
case TSTRING:
if Isconst(nl, CTSTR) {
- setintconst(n, int64(len(strlit(nl))))
+ setintconst(n, int64(len(nl.StringVal())))
}
case TARRAY:
if !hascallchan(nl) {
@@ -1019,17 +1028,17 @@ func nodlit(v Val) *Node {
func idealType(ct Ctype) *types.Type {
switch ct {
case CTSTR:
- return types.Idealstring
+ return types.UntypedString
case CTBOOL:
- return types.Idealbool
+ return types.UntypedBool
case CTINT:
- return types.Idealint
+ return types.UntypedInt
case CTRUNE:
- return types.Idealrune
+ return types.UntypedRune
case CTFLT:
- return types.Idealfloat
+ return types.UntypedFloat
case CTCPLX:
- return types.Idealcomplex
+ return types.UntypedComplex
case CTNIL:
return types.Types[TNIL]
}
@@ -1080,17 +1089,17 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
func ctype(t *types.Type) Ctype {
switch t {
- case types.Idealbool:
+ case types.UntypedBool:
return CTBOOL
- case types.Idealstring:
+ case types.UntypedString:
return CTSTR
- case types.Idealint:
+ case types.UntypedInt:
return CTINT
- case types.Idealrune:
+ case types.UntypedRune:
return CTRUNE
- case types.Idealfloat:
+ case types.UntypedFloat:
return CTFLT
- case types.Idealcomplex:
+ case types.UntypedComplex:
return CTCPLX
}
Fatalf("bad type %v", t)
@@ -1111,17 +1120,17 @@ func defaultType(t *types.Type) *types.Type {
}
switch t {
- case types.Idealbool:
+ case types.UntypedBool:
return types.Types[TBOOL]
- case types.Idealstring:
+ case types.UntypedString:
return types.Types[TSTRING]
- case types.Idealint:
+ case types.UntypedInt:
return types.Types[TINT]
- case types.Idealrune:
+ case types.UntypedRune:
return types.Runetype
- case types.Idealfloat:
+ case types.UntypedFloat:
return types.Types[TFLOAT64]
- case types.Idealcomplex:
+ case types.UntypedComplex:
return types.Types[TCOMPLEX128]
}
@@ -1129,12 +1138,6 @@ func defaultType(t *types.Type) *types.Type {
return nil
}
-// strlit returns the value of a literal string Node as a string.
-func strlit(n *Node) string {
- return n.Val().U.(string)
-}
-
-// TODO(gri) smallintconst is only used in one place - can we used indexconst?
func smallintconst(n *Node) bool {
if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
switch simtype[n.Type.Etype] {
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index a362d1a643..b8ca0d2e03 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -283,7 +283,7 @@ func oldname(s *types.Sym) *Node {
c.Name.Defn = n
// Link into list of active closure variables.
- // Popped from list in func closurebody.
+ // Popped from list in func funcLit.
c.Name.Param.Outer = n.Name.Param.Innermost
n.Name.Param.Innermost = c
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go
index 27e2cbcd98..5120fa1166 100644
--- a/src/cmd/compile/internal/gc/dwinl.go
+++ b/src/cmd/compile/internal/gc/dwinl.go
@@ -34,7 +34,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// Walk progs to build up the InlCalls data structure
var prevpos src.XPos
- for p := fnsym.Func.Text; p != nil; p = p.Link {
+ for p := fnsym.Func().Text; p != nil; p = p.Link {
if p.Pos == prevpos {
continue
}
@@ -150,7 +150,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
start := int64(-1)
curii := -1
var prevp *obj.Prog
- for p := fnsym.Func.Text; p != nil; prevp, p = p, p.Link {
+ for p := fnsym.Func().Text; p != nil; prevp, p = p, p.Link {
if prevp != nil && p.Pos == prevp.Pos {
continue
}
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
new file mode 100644
index 0000000000..103949c1f9
--- /dev/null
+++ b/src/cmd/compile/internal/gc/embed.go
@@ -0,0 +1,273 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var embedlist []*Node
+
+var embedCfg struct {
+ Patterns map[string][]string
+ Files map[string]string
+}
+
+func readEmbedCfg(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &embedCfg); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if embedCfg.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if embedCfg.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+const (
+ embedUnknown = iota
+ embedBytes
+ embedString
+ embedFiles
+)
+
+var numLocalEmbed int
+
+func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
+ haveEmbed := false
+ for _, decl := range p.file.DeclList {
+ imp, ok := decl.(*syntax.ImportDecl)
+ if !ok {
+ // imports always come first
+ break
+ }
+ path, _ := strconv.Unquote(imp.Path.Value)
+ if path == "embed" {
+ haveEmbed = true
+ break
+ }
+ }
+
+ pos := embeds[0].Pos
+ if !haveEmbed {
+ p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
+ return exprs
+ }
+ if embedCfg.Patterns == nil {
+ p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
+ return exprs
+ }
+ if len(names) > 1 {
+ p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
+ return exprs
+ }
+ if len(exprs) > 0 {
+ p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
+ return exprs
+ }
+ if typ == nil {
+ // Should not happen, since len(exprs) == 0 now.
+ p.yyerrorpos(pos, "go:embed cannot apply to var without type")
+ return exprs
+ }
+
+ kind := embedKindApprox(typ)
+ if kind == embedUnknown {
+ p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
+ return exprs
+ }
+
+ // Build list of files to store.
+ have := make(map[string]bool)
+ var list []string
+ for _, e := range embeds {
+ for _, pattern := range e.Patterns {
+ files, ok := embedCfg.Patterns[pattern]
+ if !ok {
+ p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+ }
+ for _, file := range files {
+ if embedCfg.Files[file] == "" {
+ p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+ continue
+ }
+ if !have[file] {
+ have[file] = true
+ list = append(list, file)
+ }
+ if kind == embedFiles {
+ for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
+ have[dir] = true
+ list = append(list, dir+"/")
+ }
+ }
+ }
+ }
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return embedFileLess(list[i], list[j])
+ })
+
+ if kind == embedString || kind == embedBytes {
+ if len(list) > 1 {
+ p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
+ return exprs
+ }
+ }
+
+ v := names[0]
+ if dclcontext != PEXTERN {
+ numLocalEmbed++
+ v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
+ v.Sym.Def = asTypesNode(v)
+ v.Name.Param.Ntype = typ
+ v.SetClass(PEXTERN)
+ externdcl = append(externdcl, v)
+ exprs = []*Node{v}
+ }
+
+ v.Name.Param.SetEmbedFiles(list)
+ embedlist = append(embedlist, v)
+ return exprs
+}
+
+// embedKindApprox determines the kind of embedding variable, approximately.
+// The match is approximate because we haven't done scope resolution yet and
+// can't tell whether "string" and "byte" really mean "string" and "byte".
+// The result must be confirmed later, after type checking, using embedKind.
+func embedKindApprox(typ *Node) int {
+ if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ return embedFiles
+ }
+ // These are not guaranteed to match only string and []byte -
+ // maybe the local package has redefined one of those words.
+ // But it's the best we can do now during the noder.
+ // The stricter check happens later, in initEmbed calling embedKind.
+ if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
+ return embedString
+ }
+ if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+// embedKind determines the kind of embedding variable.
+func embedKind(typ *types.Type) int {
+ if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ return embedFiles
+ }
+ if typ == types.Types[TSTRING] {
+ return embedString
+ }
+ if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
+ if name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+// embedFileLess implements the sort order for a list of embedded files.
+// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
+func embedFileLess(x, y string) bool {
+ xdir, xelem, _ := embedFileNameSplit(x)
+ ydir, yelem, _ := embedFileNameSplit(y)
+ return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+func dumpembeds() {
+ for _, v := range embedlist {
+ initEmbed(v)
+ }
+}
+
+// initEmbed emits the init data for a //go:embed variable,
+// which is either a string, a []byte, or an embed.FS.
+func initEmbed(v *Node) {
+ files := v.Name.Param.EmbedFiles()
+ switch kind := embedKind(v.Type); kind {
+ case embedUnknown:
+ yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
+
+ case embedString, embedBytes:
+ file := files[0]
+ fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
+ if err != nil {
+ yyerrorl(v.Pos, "embed %s: %v", file, err)
+ }
+ sym := v.Sym.Linksym()
+ off := 0
+ off = dsymptr(sym, off, fsym, 0) // data string
+ off = duintptr(sym, off, uint64(size)) // len
+ if kind == embedBytes {
+ duintptr(sym, off, uint64(size)) // cap for slice
+ }
+
+ case embedFiles:
+ slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
+ off := 0
+ // []files pointed at by Files
+ off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
+ off = duintptr(slicedata, off, uint64(len(files)))
+ off = duintptr(slicedata, off, uint64(len(files)))
+
+ // embed/embed.go type file is:
+ // name string
+ // data string
+ // hash [16]byte
+ // Emit one of these per file in the set.
+ const hashSize = 16
+ hash := make([]byte, hashSize)
+ for _, file := range files {
+ off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
+ off = duintptr(slicedata, off, uint64(len(file)))
+ if strings.HasSuffix(file, "/") {
+ // entry for directory - no data
+ off = duintptr(slicedata, off, 0)
+ off = duintptr(slicedata, off, 0)
+ off += hashSize
+ } else {
+ fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
+ if err != nil {
+ yyerrorl(v.Pos, "embed %s: %v", file, err)
+ }
+ off = dsymptr(slicedata, off, fsym, 0) // data string
+ off = duintptr(slicedata, off, uint64(size))
+ off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
+ }
+ }
+ ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ sym := v.Sym.Linksym()
+ dsymptr(sym, 0, slicedata, 0)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index 375331d1f5..6f328ab5ea 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -169,36 +169,47 @@ func mayAffectMemory(n *Node) bool {
}
}
-func mustHeapAlloc(n *Node) bool {
+// heapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func heapAllocReason(n *Node) string {
if n.Type == nil {
- return false
+ return ""
}
// Parameters are always passed via the stack.
if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
- return false
+ return ""
}
if n.Type.Width > maxStackVarSize {
- return true
+ return "too large for stack"
}
if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
- return true
+ return "too large for stack"
}
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
- return true
+ return "too large for stack"
}
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
- return true
+ return "too large for stack"
}
- if n.Op == OMAKESLICE && !isSmallMakeSlice(n) {
- return true
+ if n.Op == OMAKESLICE {
+ r := n.Right
+ if r == nil {
+ r = n.Left
+ }
+ if !smallintconst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
}
- return false
+ return ""
}
// addrescapes tags node n as having had its address taken
@@ -271,7 +282,7 @@ func addrescapes(n *Node) {
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *Node) {
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
Dump("MOVE", n)
}
if compiling_runtime {
@@ -348,7 +359,7 @@ func moveToHeap(n *Node) {
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(n.Pos, "moved to heap: %v", n)
}
}
@@ -378,7 +389,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.IsUintptr() {
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
@@ -393,11 +404,11 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Func.Pragma&Noescape != 0 {
- if Debug['m'] != 0 && f.Sym != nil {
+ if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "%v does not escape", name())
}
} else {
- if Debug['m'] != 0 && f.Sym != nil {
+ if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
@@ -408,14 +419,14 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.IsUintptr() {
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
}
return uintptrEscapesTag
@@ -437,7 +448,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
esc := loc.paramEsc
esc.Optimize()
- if Debug['m'] != 0 && !loc.escapes {
+ if Debug.m != 0 && !loc.escapes {
if esc.Empty() {
Warnl(f.Pos, "%v does not escape", name())
}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
index d79d32ec48..618bdf78e2 100644
--- a/src/cmd/compile/internal/gc/escape.go
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -170,7 +170,7 @@ func (e *Escape) initFunc(fn *Node) {
Fatalf("unexpected node: %v", fn)
}
fn.Esc = EscFuncPlanned
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
Dump("escAnalyze", fn)
}
@@ -247,7 +247,7 @@ func (e *Escape) stmt(n *Node) {
lineno = lno
}()
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
}
@@ -275,11 +275,11 @@ func (e *Escape) stmt(n *Node) {
case OLABEL:
switch asNode(n.Sym.Label) {
case &nonlooping:
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
}
case &looping:
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
}
e.loopDepth++
@@ -717,7 +717,7 @@ func (e *Escape) addrs(l Nodes) []EscHole {
func (e *Escape) assign(dst, src *Node, why string, where *Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
- if ignore && Debug['m'] != 0 {
+ if ignore && Debug.m != 0 {
Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
@@ -771,10 +771,11 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
var fn *Node
switch call.Op {
case OCALLFUNC:
- if call.Left.Op == ONAME && call.Left.Class() == PFUNC {
- fn = call.Left
- } else if call.Left.Op == OCLOSURE {
- fn = call.Left.Func.Closure.Func.Nname
+ switch v := staticValue(call.Left); {
+ case v.Op == ONAME && v.Class() == PFUNC:
+ fn = v
+ case v.Op == OCLOSURE:
+ fn = v.Func.Closure.Func.Nname
}
case OCALLMETH:
fn = asNode(call.Left.Type.FuncType().Nname)
@@ -930,7 +931,7 @@ func (k EscHole) note(where *Node, why string) EscHole {
if where == nil || why == "" {
Fatalf("note: missing where/why")
}
- if Debug['m'] >= 2 || logopt.Enabled() {
+ if Debug.m >= 2 || logopt.Enabled() {
k.notes = &EscNote{
next: k.notes,
where: where,
@@ -1051,11 +1052,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
}
n.SetOpt(loc)
- if mustHeapAlloc(n) {
- why := "too large for stack"
- if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || (n.Right != nil && !Isconst(n.Right, CTINT))) {
- why = "non-constant size"
- }
+ if why := heapAllocReason(n); why != "" {
e.flow(e.heapHole().addr(n, why), loc)
}
}
@@ -1080,9 +1077,9 @@ func (e *Escape) flow(k EscHole, src *EscLocation) {
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
- if Debug['m'] >= 2 || logopt.Enabled() {
+ if Debug.m >= 2 || logopt.Enabled() {
pos := linestr(src.n.Pos)
- if Debug['m'] >= 2 {
+ if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
@@ -1182,8 +1179,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// that value flow for tagging the function
// later.
if l.isName(PPARAM) {
- if (logopt.Enabled() || Debug['m'] >= 2) && !l.escapes {
- if Debug['m'] >= 2 {
+ if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
+ if Debug.m >= 2 {
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
}
explanation := e.explainPath(root, l)
@@ -1199,8 +1196,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
- if logopt.Enabled() || Debug['m'] >= 2 {
- if Debug['m'] >= 2 {
+ if logopt.Enabled() || Debug.m >= 2 {
+ if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
}
explanation := e.explainPath(root, l)
@@ -1238,7 +1235,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
for {
// Prevent infinite loop.
if visited[src] {
- if Debug['m'] >= 2 {
+ if Debug.m >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
@@ -1266,7 +1263,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
- print := Debug['m'] >= 2
+ print := Debug.m >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
if print {
@@ -1420,7 +1417,7 @@ func (e *Escape) finish(fns []*Node) {
if loc.escapes {
if n.Op != ONAME {
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(n.Pos, "%S escapes to heap", n)
}
if logopt.Enabled() {
@@ -1430,7 +1427,7 @@ func (e *Escape) finish(fns []*Node) {
n.Esc = EscHeap
addrescapes(n)
} else {
- if Debug['m'] != 0 && n.Op != ONAME {
+ if Debug.m != 0 && n.Op != ONAME {
Warnl(n.Pos, "%S does not escape", n)
}
n.Esc = EscNone
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 44bea2b1fd..c6917e0f81 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -31,7 +31,7 @@ func exportsym(n *Node) {
}
n.Sym.SetOnExportList(true)
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
}
@@ -96,7 +96,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
return n
}
-// pkgtype returns the named type declared by symbol s.
+// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
@@ -150,7 +150,7 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val
n.SetVal(val)
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
@@ -166,7 +166,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n.Func = new(Func)
t.SetNname(asTypesNode(n))
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
@@ -179,7 +179,7 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
return
}
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
@@ -192,7 +192,7 @@ func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
return
}
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
}
}
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index 6ae363be22..c619d25705 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -6,17 +6,9 @@ package gc
import (
"math"
- "os"
- "runtime"
"testing"
)
-// For GO386=387, make sure fucomi* opcodes are not used
-// for comparison operations.
-// Note that this test will fail only on a Pentium MMX
-// processor (with GOARCH=386 GO386=387), as it just runs
-// some code and looks for an unimplemented instruction fault.
-
//go:noinline
func compare1(a, b float64) bool {
return a < b
@@ -137,9 +129,6 @@ func TestFloatCompareFolded(t *testing.T) {
}
}
-// For GO386=387, make sure fucomi* opcodes are not used
-// for float->int conversions.
-
//go:noinline
func cvt1(a float64) uint64 {
return uint64(a)
@@ -370,14 +359,6 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
// are not converted to quiet NaN (qNaN) values during compilation.
// See issue #27193 for more information.
- // TODO: this method for detecting 387 won't work if the compiler has been
- // built using GOARCH=386 GO386=387 and either the target is a different
- // architecture or the GO386=387 environment variable is not set when the
- // test is run.
- if runtime.GOARCH == "386" && os.Getenv("GO386") == "387" {
- t.Skip("signaling NaNs are not propagated on 387 (issue #27516)")
- }
-
// signaling NaNs
{
const nan = uint32(0x7f800001) // sNaN
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index d4af451506..d7ed1d2ff0 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -773,17 +773,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
var name string
switch t {
- case types.Idealbool:
+ case types.UntypedBool:
name = "untyped bool"
- case types.Idealstring:
+ case types.UntypedString:
name = "untyped string"
- case types.Idealint:
+ case types.UntypedInt:
name = "untyped int"
- case types.Idealrune:
+ case types.UntypedRune:
name = "untyped rune"
- case types.Idealfloat:
+ case types.UntypedFloat:
name = "untyped float"
- case types.Idealcomplex:
+ case types.UntypedComplex:
name = "untyped complex"
default:
name = basicnames[t.Etype]
@@ -792,6 +792,13 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
return
}
+ if mode == FDbg {
+ b.WriteString(t.Etype.String())
+ b.WriteByte('-')
+ tconv2(b, t, flag, FErr, visited)
+ return
+ }
+
// At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
// try to print it recursively.
// We record the offset in the result buffer where the type's text starts. This offset serves as a reference
@@ -805,12 +812,6 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
visited[t] = b.Len()
defer delete(visited, t)
- if mode == FDbg {
- b.WriteString(t.Etype.String())
- b.WriteByte('-')
- tconv2(b, t, flag, FErr, visited)
- return
- }
switch t.Etype {
case TPTR:
b.WriteByte('*')
@@ -1333,7 +1334,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
n.Orig.exprfmt(s, prec, mode)
return
}
- if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != types.Idealbool && n.Type != types.Idealstring {
+ if n.Type != nil && !n.Type.IsUntyped() {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 9079ce2afc..da6b6d6e72 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -61,12 +61,12 @@ type Class uint8
//go:generate stringer -type=Class
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
- PEXTERN // global variable
+ PEXTERN // global variables
PAUTO // local variables
- PAUTOHEAP // local variable or parameter moved to heap
+ PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
- PFUNC // global function
+ PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
@@ -116,7 +116,15 @@ var decldepth int32
var nolocalimports bool
-var Debug [256]int
+// gc debug flags
+type DebugFlags struct {
+ P, B, C, E,
+ K, L, N, S,
+ W, e, h, j,
+ l, m, r, w int
+}
+
+var Debug DebugFlags
var debugstr string
@@ -259,7 +267,6 @@ type Arch struct {
REGSP int
MAXWIDTH int64
- Use387 bool // should 386 backend use 387 FP instructions instead of sse2.
SoftFloat bool
PadFrame func(int64) int64
@@ -328,10 +335,6 @@ var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
- // GO386=387
- ControlWord64trunc,
- ControlWord32 *obj.LSym
-
// Wasm
WasmMove,
WasmZero,
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 480d411f49..ce5182f203 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -153,7 +153,7 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
pp.clearp(pp.next)
p.Link = pp.next
- if !pp.pos.IsKnown() && Debug['K'] != 0 {
+ if !pp.pos.IsKnown() && Debug.K != 0 {
Warn("prog: unknown position (line 0)")
}
@@ -199,7 +199,7 @@ func (pp *Progs) settext(fn *Node) {
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
- fn.Func.lsym.Func.Text = ptxt
+ fn.Func.lsym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.Func.lsym
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
index b3f50b63af..9bc1f64600 100644
--- a/src/cmd/compile/internal/gc/iexport.go
+++ b/src/cmd/compile/internal/gc/iexport.go
@@ -751,11 +751,11 @@ func (w *exportWriter) param(f *types.Field) {
func constTypeOf(typ *types.Type) Ctype {
switch typ {
- case types.Idealint, types.Idealrune:
+ case types.UntypedInt, types.UntypedRune:
return CTINT
- case types.Idealfloat:
+ case types.UntypedFloat:
return CTFLT
- case types.Idealcomplex:
+ case types.UntypedComplex:
return CTCPLX
}
@@ -780,8 +780,8 @@ func constTypeOf(typ *types.Type) Ctype {
}
func (w *exportWriter) value(typ *types.Type, v Val) {
- if typ.IsUntyped() {
- typ = untype(v.Ctype())
+ if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt {
+ Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt)
}
w.typ(typ)
@@ -1017,6 +1017,8 @@ func (w *exportWriter) symIdx(s *types.Sym) {
}
func (w *exportWriter) typeExt(t *types.Type) {
+ // Export whether this type is marked notinheap.
+ w.bool(t.NotInHeap())
// For type T, export the index of type descriptor symbols of T and *T.
if i, ok := typeSymIdx[t]; ok {
w.int64(i[0])
@@ -1264,8 +1266,13 @@ func (w *exportWriter) expr(n *Node) {
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- // case OCALLPART:
- // unimplemented - handled by default case
+ case OCALLPART:
+ // An OCALLPART is an OXDOT before type checking.
+ w.op(OXDOT)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ // Right node should be ONAME
+ w.selector(n.Right.Sym)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
w.op(OXDOT)
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
index 4169222c14..7f2b05f288 100644
--- a/src/cmd/compile/internal/gc/iimport.go
+++ b/src/cmd/compile/internal/gc/iimport.go
@@ -375,7 +375,7 @@ func (p *importReader) value() (typ *types.Type, v Val) {
v.U = p.string()
case CTINT:
x := new(Mpint)
- x.Rune = typ == types.Idealrune
+ x.Rune = typ == types.UntypedRune
p.mpint(&x.Val, typ)
v.U = x
case CTFLT:
@@ -596,7 +596,6 @@ func (r *importReader) typ1() *types.Type {
// Ensure we expand the interface in the frontend (#25055).
checkwidth(t)
-
return t
}
}
@@ -711,6 +710,7 @@ func (r *importReader) symIdx(s *types.Sym) {
}
func (r *importReader) typeExt(t *types.Type) {
+ t.SetNotInHeap(r.bool())
i, pi := r.int64(), r.int64()
if i != -1 && pi != -1 {
typeSymIdx[t] = [2]int64{i, pi}
@@ -742,8 +742,8 @@ func (r *importReader) doInline(n *Node) {
importlist = append(importlist, n)
- if Debug['E'] > 0 && Debug['m'] > 2 {
- if Debug['m'] > 3 {
+ if Debug.E > 0 && Debug.m > 2 {
+ if Debug.m > 3 {
fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
} else {
fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
@@ -866,7 +866,7 @@ func (r *importReader) node() *Node {
// unreachable - handled in case OSTRUCTLIT by elemList
// case OCALLPART:
- // unimplemented
+ // unreachable - mapped to case OXDOT below by exporter
// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
// unreachable - mapped to case OXDOT below by exporter
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index fa5b3ec698..a2fb00e132 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -7,7 +7,7 @@
// saves a copy of the body. Then inlcalls walks each function body to
// expand calls to inlinable functions.
//
-// The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1,
+// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
// are not supported.
// 0: disabled
@@ -21,7 +21,7 @@
// The -d typcheckinl flag enables early typechecking of all imported bodies,
// which is useful to flush out bugs.
//
-// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
+// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
// which calls get inlined or not, more is for debugging, and may go away at any point.
package gc
@@ -85,7 +85,7 @@ func typecheckinl(fn *Node) {
return // typecheckinl on local function
}
- if Debug['m'] > 2 || Debug_export != 0 {
+ if Debug.m > 2 || Debug_export != 0 {
fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
}
@@ -116,10 +116,10 @@ func caninl(fn *Node) {
}
var reason string // reason, if any, that the function was not inlined
- if Debug['m'] > 1 || logopt.Enabled() {
+ if Debug.m > 1 || logopt.Enabled() {
defer func() {
if reason != "" {
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
}
if logopt.Enabled() {
@@ -187,7 +187,7 @@ func caninl(fn *Node) {
defer n.Func.SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
- if Debug['l'] == 4 {
+ if Debug.l == 4 {
cc = 1 // this appears to yield better performance than 0.
}
@@ -224,9 +224,9 @@ func caninl(fn *Node) {
// this is so export can find the body of a method
fn.Type.FuncType().Nname = asTypesNode(n)
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
- } else if Debug['m'] != 0 {
+ } else if Debug.m != 0 {
fmt.Printf("%v: can inline %v\n", fn.Line(), n)
}
if logopt.Enabled() {
@@ -325,18 +325,10 @@ func (v *hairyVisitor) visit(n *Node) bool {
break
}
- if fn := n.Left.Func; fn != nil && fn.Inl != nil {
- v.budget -= fn.Inl.Cost
+ if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ v.budget -= fn.Func.Inl.Cost
break
}
- if n.Left.isMethodExpression() {
- if d := asNode(n.Left.Sym.Def); d != nil && d.Func.Inl != nil {
- v.budget -= d.Func.Inl.Cost
- break
- }
- }
- // TODO(mdempsky): Budget for OCLOSURE calls if we
- // ever allow that. See #15561 and #23093.
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
@@ -382,17 +374,16 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.reason = "call to recover"
return true
+ case OCALLPART:
+ // OCALLPART is inlineable, but no extra cost to the budget
+
case OCLOSURE,
- OCALLPART,
ORANGE,
- OFOR,
- OFORUNTIL,
OSELECT,
OTYPESW,
OGO,
ODEFER,
ODCLTYPE, // can't print yet
- OBREAK,
ORETJMP:
v.reason = "unhandled op " + n.Op.String()
return true
@@ -400,10 +391,23 @@ func (v *hairyVisitor) visit(n *Node) bool {
case OAPPEND:
v.budget -= inlineExtraAppendCost
- case ODCLCONST, OEMPTY, OFALL, OLABEL:
+ case ODCLCONST, OEMPTY, OFALL:
// These nodes don't produce code; omit from inlining budget.
return false
+ case OLABEL:
+ // TODO(mdempsky): Add support for inlining labeled control statements.
+ if n.labeledControl() != nil {
+ v.reason = "labeled control"
+ return true
+ }
+
+ case OBREAK, OCONTINUE:
+ if n.Sym != nil {
+ // Should have short-circuited due to labeledControl above.
+ Fatalf("unexpected labeled break/continue: %v", n)
+ }
+
case OIF:
if Isconst(n.Left, CTBOOL) {
// This if and the condition cost nothing.
@@ -421,7 +425,7 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget--
// When debugging, don't stop early, to get full cost of inlining this function
- if v.budget < 0 && Debug['m'] < 2 && !logopt.Enabled() {
+ if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
return true
}
@@ -452,7 +456,7 @@ func inlcopy(n *Node) *Node {
}
m := n.copy()
- if m.Func != nil {
+ if n.Op != OCALLPART && m.Func != nil {
Fatalf("unexpected Func: %v", m)
}
m.Left = inlcopy(n.Left)
@@ -666,60 +670,18 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
switch n.Op {
case OCALLFUNC:
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
}
- if n.Left.Func != nil && n.Left.Func.Inl != nil && !isIntrinsicCall(n) { // normal case
- n = mkinlcall(n, n.Left, maxCost, inlMap)
- } else if n.Left.isMethodExpression() && asNode(n.Left.Sym.Def) != nil {
- n = mkinlcall(n, asNode(n.Left.Sym.Def), maxCost, inlMap)
- } else if n.Left.Op == OCLOSURE {
- if f := inlinableClosure(n.Left); f != nil {
- n = mkinlcall(n, f, maxCost, inlMap)
- }
- } else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil {
- if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE {
- if f := inlinableClosure(d.Right); f != nil {
- // NB: this check is necessary to prevent indirect re-assignment of the variable
- // having the address taken after the invocation or only used for reads is actually fine
- // but we have no easy way to distinguish the safe cases
- if d.Left.Name.Addrtaken() {
- if Debug['m'] > 1 {
- fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left)
- }
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%v cannot be inlined (escaping closure variable)", n.Left))
- }
- break
- }
-
- // ensure the variable is never re-assigned
- if unsafe, a := reassigned(n.Left); unsafe {
- if Debug['m'] > 1 {
- if a != nil {
- fmt.Printf("%v: cannot inline re-assigned closure variable at %v: %v\n", n.Line(), a.Line(), a)
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%v cannot be inlined (re-assigned closure variable)", a))
- }
- } else {
- fmt.Printf("%v: cannot inline global closure variable %v\n", n.Line(), n.Left)
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%v cannot be inlined (global closure variable)", n.Left))
- }
- }
- }
- break
- }
- n = mkinlcall(n, f, maxCost, inlMap)
- }
- }
+ if isIntrinsicCall(n) {
+ break
+ }
+ if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ n = mkinlcall(n, fn, maxCost, inlMap)
}
case OCALLMETH:
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
}
@@ -739,16 +701,73 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
return n
}
-// inlinableClosure takes an OCLOSURE node and follows linkage to the matching ONAME with
-// the inlinable body. Returns nil if the function is not inlinable.
-func inlinableClosure(n *Node) *Node {
- c := n.Func.Closure
- caninl(c)
- f := c.Func.Nname
- if f == nil || f.Func.Inl == nil {
+// inlCallee takes a function-typed expression and returns the underlying function ONAME
+// that it refers to if statically known. Otherwise, it returns nil.
+func inlCallee(fn *Node) *Node {
+ fn = staticValue(fn)
+ switch {
+ case fn.Op == ONAME && fn.Class() == PFUNC:
+ if fn.isMethodExpression() {
+ return asNode(fn.Sym.Def)
+ }
+ return fn
+ case fn.Op == OCLOSURE:
+ c := fn.Func.Closure
+ caninl(c)
+ return c.Func.Nname
+ }
+ return nil
+}
+
+func staticValue(n *Node) *Node {
+ for {
+ n1 := staticValue1(n)
+ if n1 == nil {
+ return n
+ }
+ n = n1
+ }
+}
+
+// staticValue1 implements a simple SSA-like optimization. If n is a local variable
+// that is initialized and never reassigned, staticValue1 returns the initializer
+// expression. Otherwise, it returns nil.
+func staticValue1(n *Node) *Node {
+ if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() {
+ return nil
+ }
+
+ defn := n.Name.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs *Node
+FindRHS:
+ switch defn.Op {
+ case OAS:
+ rhs = defn.Right
+ case OAS2:
+ for i, lhs := range defn.List.Slice() {
+ if lhs == n {
+ rhs = defn.Rlist.Index(i)
+ break FindRHS
+ }
+ }
+ Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ Fatalf("RHS is nil: %v", defn)
+ }
+
+ unsafe, _ := reassigned(n)
+ if unsafe {
return nil
}
- return f
+
+ return rhs
}
// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
@@ -831,16 +850,19 @@ func (v *reassignVisitor) visitList(l Nodes) *Node {
return nil
}
-func tinlvar(t *types.Field, inlvars map[*Node]*Node) *Node {
- if n := asNode(t.Nname); n != nil && !n.isBlank() {
- inlvar := inlvars[n]
- if inlvar == nil {
- Fatalf("missing inlvar for %v\n", n)
- }
- return inlvar
+func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node {
+ n := asNode(t.Nname)
+ if n == nil || n.isBlank() {
+ return nblank
}
- return typecheck(nblank, ctxExpr|ctxAssign)
+ inlvar := inlvars[n]
+ if inlvar == nil {
+ Fatalf("missing inlvar for %v", n)
+ }
+ as.Ninit.Append(nod(ODCL, inlvar, nil))
+ inlvar.Name.Defn = as
+ return inlvar
}
var inlgen int
@@ -889,7 +911,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
if inlMap[fn] {
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
}
return n
@@ -903,12 +925,12 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
// We have a function node, and it has an inlineable body.
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
- } else if Debug['m'] != 0 {
+ } else if Debug.m != 0 {
fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
}
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
}
@@ -970,14 +992,15 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
continue
}
if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
- continue
- }
- inlvars[ln] = typecheck(inlvar(ln), ctxExpr)
- if ln.Class() == PPARAM || ln.Name.Param.Stackcopy != nil && ln.Name.Param.Stackcopy.Class() == PPARAM {
- ninit.Append(nod(ODCL, inlvars[ln], nil))
+ // TODO(mdempsky): Remove once I'm confident
+ // this never actually happens. We currently
+ // perform inlining before escape analysis, so
+ // nothing should have moved to the heap yet.
+ Fatalf("impossible: %v", ln)
}
+ inlf := typecheck(inlvar(ln), ctxExpr)
+ inlvars[ln] = inlf
if genDwarfInline > 0 {
- inlf := inlvars[ln]
if ln.Class() == PPARAM {
inlf.Name.SetInlFormal(true)
} else {
@@ -1019,56 +1042,42 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// Assign arguments to the parameters' temp names.
as := nod(OAS2, nil, nil)
- as.Rlist.Set(n.List.Slice())
+ as.SetColas(true)
+ if n.Op == OCALLMETH {
+ if n.Left.Left == nil {
+ Fatalf("method call without receiver: %+v", n)
+ }
+ as.Rlist.Append(n.Left.Left)
+ }
+ as.Rlist.Append(n.List.Slice()...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas *Node
- if fn.IsMethod() {
- rcv := fn.Type.Recv()
-
- if n.Left.Op == ODOTMETH {
- // For x.M(...), assign x directly to the
- // receiver parameter.
- if n.Left.Left == nil {
- Fatalf("method call without receiver: %+v", n)
- }
- ras := nod(OAS, tinlvar(rcv, inlvars), n.Left.Left)
- ras = typecheck(ras, ctxStmt)
- ninit.Append(ras)
- } else {
- // For T.M(...), add the receiver parameter to
- // as.List, so it's assigned by the normal
- // arguments.
- if as.Rlist.Len() == 0 {
- Fatalf("non-method call to method without first arg: %+v", n)
- }
- as.List.Append(tinlvar(rcv, inlvars))
- }
+ if recv := fn.Type.Recv(); recv != nil {
+ as.List.Append(inlParam(recv, as, inlvars))
}
-
for _, param := range fn.Type.Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD() {
- as.List.Append(tinlvar(param, inlvars))
+ as.List.Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
- numvals := n.List.Len()
-
x := as.List.Len()
- for as.List.Len() < numvals {
+ for as.List.Len() < as.Rlist.Len() {
as.List.Append(argvar(param.Type, as.List.Len()))
}
varargs := as.List.Slice()[x:]
- vas = nod(OAS, tinlvar(param, inlvars), nil)
+ vas = nod(OAS, nil, nil)
+ vas.Left = inlParam(param, vas, inlvars)
if len(varargs) == 0 {
vas.Right = nodnil()
vas.Right.Type = param.Type
@@ -1165,7 +1174,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
}
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
}
@@ -1176,7 +1185,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
func inlvar(var_ *Node) *Node {
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
@@ -1255,13 +1264,13 @@ func (subst *inlsubst) node(n *Node) *Node {
switch n.Op {
case ONAME:
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
}
return inlvar
}
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("not substituting name %+v\n", n)
}
return n
diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go
index 9d3b8c59fd..afa6b98315 100644
--- a/src/cmd/compile/internal/gc/inl_test.go
+++ b/src/cmd/compile/internal/gc/inl_test.go
@@ -83,7 +83,7 @@ func TestIntendedInlining(t *testing.T) {
"puintptr.ptr",
"spanOf",
"spanOfUnchecked",
- //"(*gcWork).putFast", // TODO(austin): For debugging #27993
+ "(*gcWork).putFast",
"(*gcWork).tryGetFast",
"(*guintptr).set",
"(*markBits).advance",
@@ -115,6 +115,7 @@ func TestIntendedInlining(t *testing.T) {
"byLiteral.Len",
"byLiteral.Less",
"byLiteral.Swap",
+ "(*dictDecoder).tryWriteCopy",
},
"encoding/base64": {
"assemble32",
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
index 1a344c6566..7cce371408 100644
--- a/src/cmd/compile/internal/gc/lex.go
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -48,8 +48,11 @@ const (
Nowritebarrierrec // error on write barrier in this or recursive callees
Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
- // Runtime-only type pragmas
+ // Runtime and cgo type pragmas
NotInHeap // values of this type must not be heap allocated
+
+ // Go command pragmas
+ GoBuildPragma
)
const (
@@ -71,6 +74,8 @@ const (
func pragmaFlag(verb string) PragmaFlag {
switch verb {
+ case "go:build":
+ return GoBuildPragma
case "go:nointerface":
if objabi.Fieldtrack_enabled != 0 {
return Nointerface
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 7ad3bfe0c8..0b65e8a0b4 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -34,8 +34,6 @@ import (
"strings"
)
-var imported_unsafe bool
-
var (
buildid string
spectre string
@@ -132,7 +130,7 @@ func hidePanic() {
// supportsDynlink reports whether or not the code generator for the given
// architecture supports the -shared and -dynlink flags.
func supportsDynlink(arch *sys.Arch) bool {
- return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.S390X)
+ return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
}
// timing data for compiler phases
@@ -211,18 +209,27 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
- objabi.Flagcount("%", "debug non-static initializers", &Debug['%'])
- objabi.Flagcount("B", "disable bounds checking", &Debug['B'])
- objabi.Flagcount("C", "disable printing of columns in error messages", &Debug['C']) // TODO(gri) remove eventually
flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
- objabi.Flagcount("E", "debug symbol export", &Debug['E'])
+
+ objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
+ objabi.Flagcount("B", "disable bounds checking", &Debug.B)
+ objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
+ objabi.Flagcount("E", "debug symbol export", &Debug.E)
+ objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
+ objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
+ objabi.Flagcount("N", "disable optimizations", &Debug.N)
+ objabi.Flagcount("S", "print assembly listing", &Debug.S)
+ objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
+ objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
+ objabi.Flagcount("h", "halt on error", &Debug.h)
+ objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
+ objabi.Flagcount("l", "disable inlining", &Debug.l)
+ objabi.Flagcount("m", "print optimization decisions", &Debug.m)
+ objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
+ objabi.Flagcount("w", "debug type checking", &Debug.w)
+
objabi.Flagfn1("I", "add `directory` to import search path", addidir)
- objabi.Flagcount("K", "debug missing line numbers", &Debug['K'])
- objabi.Flagcount("L", "show full file names in error messages", &Debug['L'])
- objabi.Flagcount("N", "disable optimizations", &Debug['N'])
- objabi.Flagcount("S", "print assembly listing", &Debug['S'])
objabi.AddVersionFlag() // -V
- objabi.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
@@ -231,17 +238,13 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
- objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
- objabi.Flagcount("h", "halt on error", &Debug['h'])
+ objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
- objabi.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
- objabi.Flagcount("l", "disable inlining", &Debug['l'])
flag.StringVar(&flag_lang, "lang", "", "release to compile for")
flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
objabi.Flagcount("live", "debug liveness analysis", &debuglive)
- objabi.Flagcount("m", "print optimization decisions", &Debug['m'])
if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
}
@@ -249,7 +252,6 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&outfile, "o", "", "write output to `file`")
flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
- objabi.Flagcount("r", "debug generated wrappers", &Debug['r'])
if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_race, "race", false, "enable race detector")
}
@@ -259,7 +261,6 @@ func Main(archInit func(*Arch)) {
}
flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
- objabi.Flagcount("w", "debug type checking", &Debug['w'])
flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
var flag_shared bool
var flag_dynlink bool
@@ -325,9 +326,9 @@ func Main(archInit func(*Arch)) {
Ctxt.Flag_shared = flag_dynlink || flag_shared
Ctxt.Flag_dynlink = flag_dynlink
- Ctxt.Flag_optimize = Debug['N'] == 0
+ Ctxt.Flag_optimize = Debug.N == 0
- Ctxt.Debugasm = Debug['S']
+ Ctxt.Debugasm = Debug.S
Ctxt.Debugvlog = Debug_vlog
if flagDWARF {
Ctxt.DebugInfo = debuginfo
@@ -399,7 +400,7 @@ func Main(archInit func(*Arch)) {
instrumenting = true
}
- if compiling_runtime && Debug['N'] != 0 {
+ if compiling_runtime && Debug.N != 0 {
log.Fatal("cannot disable optimizations while compiling runtime")
}
if nBackendWorkers < 1 {
@@ -504,11 +505,11 @@ func Main(archInit func(*Arch)) {
}
// enable inlining. for now:
- // default: inlining on. (debug['l'] == 1)
- // -l: inlining off (debug['l'] == 0)
- // -l=2, -l=3: inlining on again, with extra debugging (debug['l'] > 1)
- if Debug['l'] <= 1 {
- Debug['l'] = 1 - Debug['l']
+ // default: inlining on. (Debug.l == 1)
+ // -l: inlining off (Debug.l == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
+ if Debug.l <= 1 {
+ Debug.l = 1 - Debug.l
}
if jsonLogOpt != "" { // parse version,destination from json logging optimization.
@@ -516,6 +517,7 @@ func Main(archInit func(*Arch)) {
}
ssaDump = os.Getenv("GOSSAFUNC")
+ ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
if strings.HasSuffix(ssaDump, "+") {
ssaDump = ssaDump[:len(ssaDump)-1]
@@ -594,7 +596,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias) {
+ if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
xtop[i] = typecheck(n, ctxStmt)
}
}
@@ -606,7 +608,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias {
+ if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
xtop[i] = typecheck(n, ctxStmt)
}
}
@@ -665,7 +667,7 @@ func Main(archInit func(*Arch)) {
// Phase 5: Inlining
timings.Start("fe", "inlining")
if Debug_typecheckinl != 0 {
- // Typecheck imported function bodies if debug['l'] > 1,
+ // Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
if n.Func.Inl != nil {
@@ -679,7 +681,7 @@ func Main(archInit func(*Arch)) {
}
}
- if Debug['l'] != 0 {
+ if Debug.l != 0 {
// Find functions that can be inlined and clone them before walk expands them.
visitBottomUp(xtop, func(list []*Node, recursive bool) {
numfns := numNonClosures(list)
@@ -690,7 +692,7 @@ func Main(archInit func(*Arch)) {
// across more than one function.
caninl(n)
} else {
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
}
}
@@ -967,9 +969,10 @@ func readSymABIs(file, myimportpath string) {
if len(parts) != 3 {
log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
}
- sym, abi := parts[1], parts[2]
- if abi != "ABI0" { // Only supported external ABI right now
- log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abi)
+ sym, abistr := parts[1], parts[2]
+ abi, valid := obj.ParseABI(abistr)
+ if !valid {
+ log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
}
// If the symbol is already prefixed with
@@ -982,9 +985,9 @@ func readSymABIs(file, myimportpath string) {
// Record for later.
if parts[0] == "def" {
- symabiDefs[sym] = obj.ABI0
+ symabiDefs[sym] = abi
} else {
- symabiRefs[sym] = obj.ABI0
+ symabiRefs[sym] = abi
}
default:
log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
@@ -1173,7 +1176,6 @@ func importfile(f *Val) *types.Pkg {
}
if path_ == "unsafe" {
- imported_unsafe = true
return unsafepkg
}
@@ -1406,29 +1408,34 @@ func IsAlias(sym *types.Sym) bool {
return sym.Def != nil && asNode(sym.Def).Sym != sym
}
-// By default, assume any debug flags are incompatible with concurrent compilation.
-// A few are safe and potentially in common use for normal compiles, though; mark them as such here.
-var concurrentFlagOK = [256]bool{
- 'B': true, // disabled bounds checking
- 'C': true, // disable printing of columns in error messages
- 'e': true, // no limit on errors; errors all come from non-concurrent code
- 'I': true, // add `directory` to import search path
- 'N': true, // disable optimizations
- 'l': true, // disable inlining
- 'w': true, // all printing happens before compilation
- 'W': true, // all printing happens before compilation
- 'S': true, // printing disassembly happens at the end (but see concurrentBackendAllowed below)
+// By default, assume any debug flags are incompatible with concurrent
+// compilation. A few are safe and potentially in common use for
+// normal compiles, though; return true for those.
+func concurrentFlagOk() bool {
+ // Report whether any debug flag that would prevent concurrent
+ // compilation is set, by zeroing out the allowed ones and then
+ // checking if the resulting struct is zero.
+ d := Debug
+ d.B = 0 // disable bounds checking
+ d.C = 0 // disable printing of columns in error messages
+ d.e = 0 // no limit on errors; errors all come from non-concurrent code
+ d.N = 0 // disable optimizations
+ d.l = 0 // disable inlining
+ d.w = 0 // all printing happens before compilation
+ d.W = 0 // all printing happens before compilation
+ d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
+
+ return d == DebugFlags{}
}
func concurrentBackendAllowed() bool {
- for i, x := range &Debug {
- if x != 0 && !concurrentFlagOK[i] {
- return false
- }
+ if !concurrentFlagOk() {
+ return false
}
- // Debug['S'] by itself is ok, because all printing occurs
+
+ // Debug.S by itself is ok, because all printing occurs
// while writing the object file, and that is non-concurrent.
- // Adding Debug_vlog, however, causes Debug['S'] to also print
+ // Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Debug_vlog || debugstr != "" || debuglive > 0 {
return false
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
index 5dce533e4b..67d24ef0bc 100644
--- a/src/cmd/compile/internal/gc/noder.go
+++ b/src/cmd/compile/internal/gc/noder.go
@@ -11,6 +11,7 @@ import (
"runtime"
"strconv"
"strings"
+ "unicode"
"unicode/utf8"
"cmd/compile/internal/syntax"
@@ -90,7 +91,11 @@ func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
} else {
// line directive base
p0 := b0.Pos()
- p1 := src.MakePos(p.makeSrcPosBase(p0.Base()), p0.Line(), p0.Col())
+ p0b := p0.Base()
+ if p0b == b0 {
+ panic("infinite recursion in makeSrcPosBase")
+ }
+ p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col())
b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
}
p.basemap[b0] = b1
@@ -130,11 +135,13 @@ type noder struct {
base *src.PosBase
}
- file *syntax.File
- linknames []linkname
- pragcgobuf [][]string
- err chan syntax.Error
- scope ScopeID
+ file *syntax.File
+ linknames []linkname
+ pragcgobuf [][]string
+ err chan syntax.Error
+ scope ScopeID
+ importedUnsafe bool
+ importedEmbed bool
// scopeVars is a stack tracking the number of variables declared in the
// current function at the moment each open scope was opened.
@@ -236,19 +243,21 @@ type linkname struct {
func (p *noder) node() {
types.Block = 1
- imported_unsafe = false
+ p.importedUnsafe = false
+ p.importedEmbed = false
p.setlineno(p.file.PkgName)
mkpackage(p.file.PkgName.Value)
if pragma, ok := p.file.Pragma.(*Pragma); ok {
+ pragma.Flag &^= GoBuildPragma
p.checkUnused(pragma)
}
xtop = append(xtop, p.decls(p.file.DeclList)...)
for _, n := range p.linknames {
- if !imported_unsafe {
+ if !p.importedUnsafe {
p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
@@ -323,7 +332,6 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
val := p.basicLit(imp.Path)
ipkg := importfile(&val)
-
if ipkg == nil {
if nerrors == 0 {
Fatalf("phase error in import")
@@ -331,6 +339,13 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return
}
+ if ipkg == unsafepkg {
+ p.importedUnsafe = true
+ }
+ if ipkg.Path == "embed" {
+ p.importedEmbed = true
+ }
+
ipkg.Direct = true
var my *types.Sym
@@ -372,6 +387,20 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
}
if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if len(pragma.Embeds) > 0 {
+ if !p.importedEmbed {
+ // This check can't be done when building the list pragma.Embeds
+ // because that list is created before the noder starts walking over the file,
+ // so at that point it hasn't seen the imports.
+ // We're left to check now, just before applying the //go:embed lines.
+ for _, e := range pragma.Embeds {
+ p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
+ }
+ } else {
+ exprs = varEmbed(p, names, typ, exprs, pragma.Embeds)
+ }
+ pragma.Embeds = nil
+ }
p.checkUnused(pragma)
}
@@ -454,17 +483,17 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
param := n.Name.Param
param.Ntype = typ
- param.Alias = decl.Alias
+ param.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
if !decl.Alias {
- param.Pragma = pragma.Flag & TypePragmas
+ param.SetPragma(pragma.Flag & TypePragmas)
pragma.Flag &^= TypePragmas
}
p.checkUnused(pragma)
}
nod := p.nod(decl, ODCLTYPE, n, nil)
- if param.Alias && !langSupported(1, 9, localpkg) {
+ if param.Alias() && !langSupported(1, 9, localpkg) {
yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
}
return nod
@@ -773,7 +802,7 @@ func (p *noder) sum(x syntax.Expr) *Node {
n := p.expr(x)
if Isconst(n, CTSTR) && n.Sym == nil {
nstr = n
- chunks = append(chunks, strlit(nstr))
+ chunks = append(chunks, nstr.StringVal())
}
for i := len(adds) - 1; i >= 0; i-- {
@@ -783,12 +812,12 @@ func (p *noder) sum(x syntax.Expr) *Node {
if Isconst(r, CTSTR) && r.Sym == nil {
if nstr != nil {
// Collapse r into nstr instead of adding to n.
- chunks = append(chunks, strlit(r))
+ chunks = append(chunks, r.StringVal())
continue
}
nstr = r
- chunks = append(chunks, strlit(nstr))
+ chunks = append(chunks, nstr.StringVal())
} else {
if len(chunks) > 1 {
nstr.SetVal(Val{U: strings.Join(chunks, "")})
@@ -1437,11 +1466,6 @@ func (p *noder) mkname(name *syntax.Name) *Node {
return mkname(p.name(name))
}
-func (p *noder) newname(name *syntax.Name) *Node {
- // TODO(mdempsky): Set line number?
- return newname(p.name(name))
-}
-
func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
@@ -1497,13 +1521,15 @@ var allowedStdPragmas = map[string]bool{
"go:cgo_import_dynamic": true,
"go:cgo_ldflag": true,
"go:cgo_dynamic_linker": true,
+ "go:embed": true,
"go:generate": true,
}
// *Pragma is the value stored in a syntax.Pragma during parsing.
type Pragma struct {
- Flag PragmaFlag // collected bits
- Pos []PragmaPos // position of each individual flag
+ Flag PragmaFlag // collected bits
+ Pos []PragmaPos // position of each individual flag
+ Embeds []PragmaEmbed
}
type PragmaPos struct {
@@ -1511,12 +1537,22 @@ type PragmaPos struct {
Pos syntax.Pos
}
+type PragmaEmbed struct {
+ Pos syntax.Pos
+ Patterns []string
+}
+
func (p *noder) checkUnused(pragma *Pragma) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.yyerrorpos(pos.Pos, "misplaced compiler directive")
}
}
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.yyerrorpos(e.Pos, "misplaced go:embed directive")
+ }
+ }
}
func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
@@ -1525,6 +1561,11 @@ func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
}
}
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
+ }
+ }
}
// pragma is called concurrently if files are parsed concurrently.
@@ -1569,6 +1610,17 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
}
p.linknames = append(p.linknames, linkname{pos, f[1], target})
+ case text == "go:embed", strings.HasPrefix(text, "go:embed "):
+ args, err := parseGoEmbed(text[len("go:embed"):])
+ if err != nil {
+ p.error(syntax.Error{Pos: pos, Msg: err.Error()})
+ }
+ if len(args) == 0 {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
+ break
+ }
+ pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
+
case strings.HasPrefix(text, "go:cgo_import_dynamic "):
// This is permitted for general use because Solaris
// code relies on it in golang.org/x/sys/unix and others.
@@ -1641,3 +1693,64 @@ func mkname(sym *types.Sym) *Node {
}
return n
}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// go/build/read.go also processes these strings and contains similar logic.
+func parseGoEmbed(args string) ([]string, error) {
+ var list []string
+ for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
+ var path string
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ args = args[i:]
+
+ case '`':
+ i := strings.Index(args[1:], "`")
+ if i < 0 {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ path = args[1 : 1+i]
+ args = args[1+i+1:]
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ args = args[i+1:]
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, path)
+ }
+ return list, nil
+}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index b55331a948..226eb45252 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -14,6 +14,8 @@ import (
"encoding/json"
"fmt"
"io"
+ "io/ioutil"
+ "os"
"sort"
"strconv"
)
@@ -125,6 +127,7 @@ func dumpdata() {
itabsLen := len(itabs)
dumpimportstrings()
dumpbasictypes()
+ dumpembeds()
// Calls to dumpsignats can generate functions,
// like method wrappers and hash and equality routines.
@@ -272,7 +275,7 @@ func dumpGlobalConst(n *Node) {
default:
return
}
- Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64())
+ Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
}
func dumpglobls() {
@@ -305,20 +308,21 @@ func dumpglobls() {
// global symbols can't be declared during parallel compilation.
func addGCLocals() {
for _, s := range Ctxt.Text {
- if s.Func == nil {
+ fn := s.Func()
+ if fn == nil {
continue
}
- for _, gcsym := range []*obj.LSym{s.Func.GCArgs, s.Func.GCLocals, s.Func.GCRegs} {
+ for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals, fn.GCRegs} {
if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
}
- if x := s.Func.StackObjects; x != nil {
+ if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
ggloblsym(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
- if x := s.Func.OpenCodedDeferInfo; x != nil {
+ if x := fn.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
}
@@ -357,28 +361,31 @@ func dbvec(s *obj.LSym, off int, bv bvec) int {
return off
}
+const (
+ stringSymPrefix = "go.string."
+ stringSymPattern = ".gostring.%d.%x"
+)
+
+// stringsym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
+ // Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
- symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil))
+ symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
- const prefix = "go.string."
- symdataname := prefix + symname
-
- symdata := Ctxt.Lookup(symdataname)
-
+ symdata := Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
- // string data
- off := dsname(symdata, 0, s, pos, "string")
+ off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
@@ -386,26 +393,122 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
return symdata
}
-var slicebytes_gen int
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ if !info.Mode().IsRegular() {
+ return nil, 0, fmt.Errorf("not a regular file")
+ }
+ size := info.Size()
+ if size <= 1*1024 {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if int64(len(data)) != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ var sym *obj.LSym
+ if readonly {
+ sym = stringsym(pos, string(data))
+ } else {
+ sym = slicedata(pos, string(data)).Sym.Linksym()
+ }
+ if len(hash) > 0 {
+ sum := sha256.Sum256(data)
+ copy(hash, sum[:])
+ }
+ return sym, size, nil
+ }
+ if size > 2e9 {
+ // ggloblsym takes an int32,
+ // and probably the rest of the toolchain
+ // can't handle such big symbols either.
+ // See golang.org/issue/9862.
+ return nil, 0, fmt.Errorf("file too large")
+ }
-func slicebytes(nam *Node, s string) {
- slicebytes_gen++
- symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
+ // File is too big to read and keep in memory.
+ // Compute hash if needed for read-only content hashing or if the caller wants it.
+ var sum []byte
+ if readonly || len(hash) > 0 {
+ h := sha256.New()
+ n, err := io.Copy(h, f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ sum = h.Sum(nil)
+ copy(hash, sum)
+ }
+
+ var symdata *obj.LSym
+ if readonly {
+ symname := fmt.Sprintf(stringSymPattern, size, sum)
+ symdata = Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ // Note: AttrContentAddressable cannot be set here,
+ // because the content-addressable-handling code
+ // does not know about file symbols.
+ }
+ } else {
+ // Emit a zero-length data symbol
+ // and then fix up length and content to use file.
+ symdata = slicedata(pos, "").Sym.Linksym()
+ symdata.Size = size
+ symdata.Type = objabi.SNOPTRDATA
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ }
+
+ return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *Node {
+ slicedataGen++
+ symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := localpkg.Lookup(symname)
symnode := newname(sym)
sym.Def = asTypesNode(symnode)
lsym := sym.Linksym()
- off := dsname(lsym, 0, s, nam.Pos, "slice")
+ off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+ return symnode
+}
+
+func slicebytes(nam *Node, s string) {
if nam.Op != ONAME {
Fatalf("slicebytes %v", nam)
}
- slicesym(nam, symnode, int64(len(s)))
+ slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
}
-func dsname(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 75da154fe2..863de5b6c7 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -50,7 +50,7 @@ type Order struct {
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
func order(fn *Node) {
- if Debug['W'] > 1 {
+ if Debug.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
dumplist(s, fn.Nbody)
}
@@ -323,12 +323,7 @@ func (o *Order) stmtList(l Nodes) {
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
func orderMakeSliceCopy(s []*Node) {
- const go115makeslicecopy = true
- if !go115makeslicecopy {
- return
- }
-
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return
}
@@ -1102,7 +1097,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
haslit := false
for _, n1 := range n.List.Slice() {
hasbyte = hasbyte || n1.Op == OBYTES2STR
- haslit = haslit || n1.Op == OLITERAL && len(strlit(n1)) != 0
+ haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0
}
if haslit && hasbyte {
@@ -1274,7 +1269,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
var t *types.Type
switch n.Op {
case OSLICELIT:
- t = types.NewArray(n.Type.Elem(), n.Right.Int64())
+ t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
case OCALLPART:
t = partialCallType(n)
}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 52b1ed351d..353f4b08c9 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -266,8 +266,8 @@ func compile(fn *Node) {
dtypesym(n.Type)
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
- if fn.Func.lsym.Func.StackObjects == nil {
- fn.Func.lsym.Func.StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
+ if fn.Func.lsym.Func().StackObjects == nil {
+ fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
}
}
}
@@ -415,7 +415,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
case PAUTO:
if !n.Name.Used() {
// Text == nil -> generating abstract function
- if fnsym.Func.Text != nil {
+ if fnsym.Func().Text != nil {
Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
continue
@@ -425,7 +425,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
continue
}
apdecls = append(apdecls, n)
- fnsym.Func.RecordAutoType(ngotype(n).Linksym())
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
@@ -435,7 +435,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
- for t, _ := range fnsym.Func.Autot {
+ for t, _ := range fnsym.Func().Autot {
typesyms = append(typesyms, t)
}
sort.Sort(obj.BySymName(typesyms))
@@ -444,7 +444,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
r.Sym = sym
r.Type = objabi.R_USETYPE
}
- fnsym.Func.Autot = nil
+ fnsym.Func().Autot = nil
var varScopes []ScopeID
for _, decl := range decls {
@@ -522,7 +522,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
- delete(fnsym.Func.Autot, ngotype(n).Linksym())
+ delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
@@ -667,7 +667,7 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
- fnsym.Func.RecordAutoType(ngotype(n).Linksym())
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
return decls, vars
@@ -731,7 +731,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
}
gotype := ngotype(n).Linksym()
- delete(fnsym.Func.Autot, gotype)
+ delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index a9ea37701e..b471accb65 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -1552,26 +1552,27 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
// Emit the live pointer map data structures
ls := e.curfn.Func.lsym
- ls.Func.GCArgs, ls.Func.GCLocals, ls.Func.GCRegs = lv.emit()
+ fninfo := ls.Func()
+ fninfo.GCArgs, fninfo.GCLocals, fninfo.GCRegs = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ls.Func.GCArgs
+ p.To.Sym = fninfo.GCArgs
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ls.Func.GCLocals
+ p.To.Sym = fninfo.GCLocals
if !go115ReduceLiveness {
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_RegPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ls.Func.GCRegs
+ p.To.Sym = fninfo.GCRegs
}
return lv.livenessMap
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 5434b0167a..1b4d765d42 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -112,12 +112,13 @@ func typecheckrangeExpr(n *Node) {
v2 = nil
}
- var why string
if v1 != nil {
if v1.Name != nil && v1.Name.Defn == n {
v1.Type = t1
- } else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ } else if v1.Type != nil {
+ if op, why := assignop(t1, v1.Type); op == OXXX {
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ }
}
checkassign(n, v1)
}
@@ -125,8 +126,10 @@ func typecheckrangeExpr(n *Node) {
if v2 != nil {
if v2.Name != nil && v2.Name.Defn == n {
v2.Type = t2
- } else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ } else if v2.Type != nil {
+ if op, why := assignop(t2, v2.Type); op == OXXX {
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ }
}
checkassign(n, v2)
}
@@ -463,7 +466,7 @@ func walkrange(n *Node) *Node {
//
// where == for keys of map m is reflexive.
func isMapClear(n *Node) bool {
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return false
}
@@ -530,7 +533,7 @@ func mapClear(m *Node) *Node {
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(n, v1, v2, a *Node) bool {
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return false
}
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 21429af782..229fcfeaee 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -1275,8 +1275,9 @@ func dtypesym(t *types.Type) *obj.LSym {
}
ot = dgopkgpath(lsym, ot, tpkg)
+ xcount := sort.Search(n, func(i int) bool { return !types.IsExported(m[i].name.Name) })
ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(n))
+ ot = duintptr(lsym, ot, uint64(xcount))
ot = duintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd)
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go
index d7239d5693..e66b859e10 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/gc/scope.go
@@ -62,9 +62,9 @@ func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
if len(marks) == 0 {
return
}
- p0 := fnsym.Func.Text
+ p0 := fnsym.Func().Text
scope := findScope(marks, p0.Pos)
- for p := fnsym.Func.Text; p != nil; p = p.Link {
+ for p := p0; p != nil; p = p.Link {
if p.Pos == p0.Pos {
continue
}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index af19a96bbc..212fcc022d 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -39,7 +39,7 @@ func (s *InitSchedule) append(n *Node) {
// staticInit adds an initialization statement n to the schedule.
func (s *InitSchedule) staticInit(n *Node) {
if !s.tryStaticInit(n) {
- if Debug['%'] != 0 {
+ if Debug.P != 0 {
Dump("nonstatic", n)
}
s.append(n)
@@ -128,7 +128,7 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
case OSLICELIT:
// copy slice
a := s.inittemps[r]
- slicesym(l, a, r.Right.Int64())
+ slicesym(l, a, r.Right.Int64Val())
return true
case OARRAYLIT, OSTRUCTLIT:
@@ -205,7 +205,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
case OSTR2BYTES:
if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
- sval := strlit(r.Left)
+ sval := r.Left.StringVal()
slicebytes(l, sval)
return true
}
@@ -213,7 +213,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
case OSLICELIT:
s.initplan(r)
// Init slice.
- bound := r.Right.Int64()
+ bound := r.Right.Int64Val()
ta := types.NewArray(r.Type.Elem(), bound)
ta.SetNoalg(true)
a := staticname(ta)
@@ -375,11 +375,6 @@ func readonlystaticname(t *types.Type) *Node {
return n
}
-func isLiteral(n *Node) bool {
- // Treat nils as zeros rather than literals.
- return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
-}
-
func (n *Node) isSimpleName() bool {
return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
}
@@ -404,7 +399,7 @@ const (
func getdyn(n *Node, top bool) initGenType {
switch n.Op {
default:
- if isLiteral(n) {
+ if n.isGoConst() {
return initConst
}
return initDynamic
@@ -413,7 +408,7 @@ func getdyn(n *Node, top bool) initGenType {
if !top {
return initDynamic
}
- if n.Right.Int64()/4 > int64(n.List.Len()) {
+ if n.Right.Int64Val()/4 > int64(n.List.Len()) {
// <25% of entries have explicit values.
// Very rough estimation, it takes 4 bytes of instructions
// to initialize 1 byte of result. So don't use a static
@@ -559,7 +554,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
continue
}
- islit := isLiteral(value)
+ islit := value.isGoConst()
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
continue
}
@@ -589,12 +584,12 @@ func isSmallSliceLit(n *Node) bool {
r := n.Right
- return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64() <= smallArrayBytes/n.Type.Elem().Width)
+ return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
}
func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type.Elem(), n.Right.Int64())
+ t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
dowidth(t)
if ctxt == inNonInitFunction {
@@ -732,7 +727,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
continue
}
- if vstat != nil && isLiteral(value) { // already set by copy from static value
+ if vstat != nil && value.isGoConst() { // already set by copy from static value
continue
}
@@ -993,7 +988,7 @@ func oaslit(n *Node, init *Nodes) bool {
func getlit(lit *Node) int {
if smallintconst(lit) {
- return int(lit.Int64())
+ return int(lit.Int64Val())
}
return -1
}
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 815ff7f99f..fb9d3e811a 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -9,8 +9,8 @@ import (
"fmt"
"html"
"os"
+ "path/filepath"
"sort"
- "strings"
"bufio"
"bytes"
@@ -27,6 +27,7 @@ var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
+var ssaDir string // optional destination for ssa dump file
var ssaDumpStdout bool // whether to dump to stdout
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
@@ -49,21 +50,16 @@ func initssaconfig() {
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
- _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string
_ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
_ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
- _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[TINT16]) // *int16
_ = types.NewPtr(types.Types[TINT64]) // *int64
_ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug['N'] == 0)
- if thearch.LinkArch.Name == "386" {
- ssaConfig.Set387(thearch.Use387)
- }
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
ssaConfig.Race = flag_race
ssaCaches = make([]ssa.Cache, nBackendWorkers)
@@ -174,10 +170,6 @@ func initssaconfig() {
ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU")
}
- // GO386=387 runtime definitions
- ControlWord64trunc = sysvar("controlWord64trunc") // uint16
- ControlWord32 = sysvar("controlWord32") // uint16
-
// Wasm (all asm funcs with special ABIs)
WasmMove = sysvar("wasmMove")
WasmZero = sysvar("wasmZero")
@@ -248,7 +240,7 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
- s.curfn.Func.lsym.Func.OpenCodedDeferInfo = x
+ s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
@@ -347,7 +339,13 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f.Entry.Pos = fn.Pos
if printssa {
- s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f, ssaDumpCFG)
+ ssaDF := ssaDumpFile
+ if ssaDir != "" {
+ ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
+ ssaD := filepath.Dir(ssaDF)
+ os.MkdirAll(ssaD, 0755)
+ }
+ s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
// TODO: generate and print a mapping from nodes to values and blocks
dumpSourcesColumn(s.f.HTMLWriter, fn)
s.f.HTMLWriter.WriteAST("AST", astBuf)
@@ -359,7 +357,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.fwdVars = map[*Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = Debug['N'] == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
switch {
case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
@@ -743,7 +741,7 @@ func (s *state) pushLine(line src.XPos) {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
- if Debug['K'] != 0 {
+ if Debug.K != 0 {
Warn("buildssa: unknown position (line 0)")
}
} else {
@@ -1216,7 +1214,7 @@ func (s *state) stmt(n *Node) {
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 {
+ if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
@@ -1273,7 +1271,7 @@ func (s *state) stmt(n *Node) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
- if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
+ if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
// [0:...] is the same as [:...]
i = nil
}
@@ -1303,7 +1301,7 @@ func (s *state) stmt(n *Node) {
case OIF:
if Isconst(n.Left, CTBOOL) {
s.stmtList(n.Left.Ninit)
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
s.stmtList(n.Nbody)
} else {
s.stmtList(n.Rlist)
@@ -2474,6 +2472,11 @@ func (s *state) expr(n *Node) *ssa.Value {
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ case OANDNOT:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
@@ -2557,22 +2560,22 @@ func (s *state) expr(n *Node) *ssa.Value {
return s.addr(n.Left)
case ORESULT:
- if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall {
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.load(n.Type, addr)
+ return s.rawLoad(n.Type, addr)
}
which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
if which == -1 {
// Do the old thing // TODO: Panic instead.
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.load(n.Type, addr)
+ return s.rawLoad(n.Type, addr)
}
if canSSAType(n.Type) {
return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
- return s.load(n.Type, addr)
+ return s.rawLoad(n.Type, addr)
}
case ODEREF:
@@ -2612,7 +2615,7 @@ func (s *state) expr(n *Node) *ssa.Value {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(strlit(n.Left)[n.Right.Int64()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
}
a := s.expr(n.Left)
i := s.expr(n.Right)
@@ -2621,7 +2624,7 @@ func (s *state) expr(n *Node) *ssa.Value {
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
@@ -3391,6 +3394,13 @@ func init() {
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
@@ -3429,6 +3439,12 @@ func init() {
return nil
},
sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -3530,12 +3546,24 @@ func init() {
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "And",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "Or",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
@@ -3544,9 +3572,19 @@ func init() {
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+ alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
@@ -4251,6 +4289,7 @@ func (s *state) openDeferExit() {
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
zeroval := s.constInt8(types.Types[TUINT8], 0)
+ testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
@@ -4288,23 +4327,38 @@ func (s *state) openDeferExit() {
stksize := fn.Type.ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
+ var callArgs []*ssa.Value
if r.rcvr != nil {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
- s.store(types.Types[TUINTPTR], addr, v)
+ if testLateExpansion {
+ callArgs = append(callArgs, v)
+ } else {
+ s.store(types.Types[TUINTPTR], addr, v)
+ }
}
for j, argAddrVal := range r.argVals {
f := getParam(r.n, j)
pt := types.NewPtr(f.Type)
- addr := s.constOffPtrSP(pt, argStart+f.Offset)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart + f.Offset)})
- if !canSSAType(f.Type) {
- s.move(f.Type, addr, argAddrVal)
+ ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
+ if testLateExpansion {
+ var a *ssa.Value
+ if !canSSAType(f.Type) {
+ a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
+ } else {
+ a = s.load(f.Type, argAddrVal)
+ }
+ callArgs = append(callArgs, a)
} else {
- argVal := s.load(f.Type, argAddrVal)
- s.storeType(f.Type, addr, argVal, 0, false)
+ addr := s.constOffPtrSP(pt, argStart+f.Offset)
+ if !canSSAType(f.Type) {
+ s.move(f.Type, addr, argAddrVal)
+ } else {
+ argVal := s.load(f.Type, argAddrVal)
+ s.storeType(f.Type, addr, argVal, 0, false)
+ }
}
}
var call *ssa.Value
@@ -4312,13 +4366,31 @@ func (s *state) openDeferExit() {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[TUINTPTR], v)
- call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, v, s.mem())
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
+ }
} else {
- // Do a static call if the original call was a static function or method
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ // Do a static call if the original call was a static function or method
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
}
call.AuxInt = stksize
- s.vars[&memVar] = call
+ if testLateExpansion {
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ s.vars[&memVar] = call
+ }
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
@@ -4375,11 +4447,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
switch n.Op {
case OCALLFUNC:
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
sym = fn.Sym
- if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") {
- testLateExpansion = true
- }
break
}
closure = s.expr(fn)
@@ -4392,11 +4462,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
if fn.Op != ODOTMETH {
s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
sym = fn.Sym
- if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") {
- testLateExpansion = true
- }
break
}
closure = s.getMethodClosure(fn)
@@ -4406,6 +4474,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
if fn.Op != ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
}
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
@@ -4424,6 +4493,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
var call *ssa.Value
if k == callDeferStack {
+ testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
d := tempAt(n.Pos, s.curfn, t)
@@ -4474,10 +4544,17 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
}
// Call runtime.deferprocStack with pointer to _defer record.
- arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
- s.store(types.Types[TUINTPTR], arg0, addr)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, addr, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
+ s.store(types.Types[TUINTPTR], arg0, addr)
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
if stksize < int64(Widthptr) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
@@ -4544,9 +4621,21 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// call target
switch {
case k == callDefer:
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc, ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults)
+ if testLateExpansion {
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
case k == callGo:
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc, ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults)
+ if testLateExpansion {
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
@@ -4554,18 +4643,25 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
- call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
+ if testLateExpansion {
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
+ }
case codeptr != nil:
- call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
+ if testLateExpansion {
+ aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
+ call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
+ }
case sym != nil:
if testLateExpansion {
- var tys []*types.Type
aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults)
- for i := int64(0); i < aux.NResults(); i++ {
- tys = append(tys, aux.TypeOfResult(i))
- }
- tys = append(tys, types.TypeMem)
- call = s.newValue0A(ssa.OpStaticLECall, types.NewResults(tys), aux)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
@@ -4606,7 +4702,11 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
}
fp := res.Field(0)
if returnResultAddr {
- return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
+ pt := types.NewPtr(fp.Type)
+ if testLateExpansion {
+ return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
+ }
+ return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
}
if testLateExpansion {
@@ -4649,7 +4749,7 @@ func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
s.nilCheck(itab)
itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
- rcvr := s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
+ rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
@@ -4710,7 +4810,7 @@ func (s *state) addr(n *Node) *ssa.Value {
}
case ORESULT:
// load return from callee
- if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall {
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
return s.constOffPtrSP(t, n.Xoffset)
}
which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
@@ -4770,7 +4870,7 @@ func (s *state) addr(n *Node) *ssa.Value {
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
- if Debug['N'] != 0 {
+ if Debug.N != 0 {
return false
}
for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
@@ -4881,7 +4981,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
- if bounded || Debug['B'] != 0 {
+ if bounded || Debug.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
@@ -5013,15 +5113,22 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
s.prevCall = nil
// Write args to the stack
off := Ctxt.FixedFrameSize()
+ testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
var ACArgs []ssa.Param
var ACResults []ssa.Param
+ var callArgs []*ssa.Value
+
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
- ptr := s.constOffPtrSP(t.PtrTo(), off)
size := t.Size()
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
- s.store(t, ptr, arg)
+ if testLateExpansion {
+ callArgs = append(callArgs, arg)
+ } else {
+ ptr := s.constOffPtrSP(t.PtrTo(), off)
+ s.store(t, ptr, arg)
+ }
off += size
}
off = Rnd(off, int64(Widthreg))
@@ -5035,8 +5142,17 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
}
// Issue call
- call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn, ACArgs, ACResults), s.mem())
- s.vars[&memVar] = call
+ var call *ssa.Value
+ aux := ssa.StaticAuxCall(fn, ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ s.vars[&memVar] = call
+ }
if !returns {
// Finish block
@@ -5052,11 +5168,24 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
// Load results
res := make([]*ssa.Value, len(results))
- for i, t := range results {
- off = Rnd(off, t.Alignment())
- ptr := s.constOffPtrSP(types.NewPtr(t), off)
- res[i] = s.load(t, ptr)
- off += t.Size()
+ if testLateExpansion {
+ for i, t := range results {
+ off = Rnd(off, t.Alignment())
+ if canSSAType(t) {
+ res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
+ } else {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
+ res[i] = s.rawLoad(t, addr)
+ }
+ off += t.Size()
+ }
+ } else {
+ for i, t := range results {
+ off = Rnd(off, t.Alignment())
+ ptr := s.constOffPtrSP(types.NewPtr(t), off)
+ res[i] = s.load(t, ptr)
+ off += t.Size()
+ }
}
off = Rnd(off, int64(Widthptr))
@@ -5093,7 +5222,10 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
- // no scalar fields.
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ s.store(t, left, right) // see issue 42032
+ }
+ // otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
@@ -5137,6 +5269,9 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ break // see issue 42032
+ }
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
@@ -5913,9 +6048,7 @@ type SSAGenState struct {
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
- // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
- SSEto387 map[int16]int16
- // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
+ // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
ScratchFpMem *Node
maxarg int64 // largest frame size for arguments to calls made by the function
@@ -6021,7 +6154,7 @@ func emitStackObjects(e *ssafn, pp *Progs) {
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func.lsym.Func.StackObjects
+ x := e.curfn.Func.lsym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
@@ -6058,7 +6191,7 @@ func genssa(f *ssa.Func, pp *Progs) {
s.livenessMap = liveness(e, f, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func.lsym.Func.OpenCodedDeferInfo
+ openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
@@ -6082,10 +6215,6 @@ func genssa(f *ssa.Func, pp *Progs) {
progToBlock[s.pp.next] = f.Blocks[0]
}
- if thearch.Use387 {
- s.SSEto387 = map[int16]int16{}
- }
-
s.ScratchFpMem = e.scratchFpMem
if Ctxt.Flag_locationlists {
@@ -6208,7 +6337,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
// Emit control flow instructions for block
var next *ssa.Block
- if i < len(f.Blocks)-1 && Debug['N'] == 0 {
+ if i < len(f.Blocks)-1 && Debug.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
@@ -6267,7 +6396,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func.lsym.Func.AddInlMark(p, inlMarks[m])
+ pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
@@ -6279,7 +6408,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func.lsym.Func.AddInlMark(p, inlMarks[p])
+ pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
}
}
}
@@ -6516,7 +6645,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
} else {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
}
- if bounded || Debug['B'] != 0 {
+ if bounded || Debug.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
@@ -6790,56 +6919,38 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this string up into two separate variables.
- p := e.splitSlot(&name, ".ptr", 0, ptrType)
- l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
- return p, l
- }
- // Return the two parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
+ // Split this string up into two separate variables.
+ p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+ l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+ return p, l
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
u := types.Types[TUINTPTR]
t := types.NewPtr(types.Types[TUINT8])
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this interface up into two separate variables.
- f := ".itab"
- if n.Type.IsEmptyInterface() {
- f = ".type"
- }
- c := e.splitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
- d := e.splitSlot(&name, ".data", u.Size(), t)
- return c, d
+ // Split this interface up into two separate variables.
+ f := ".itab"
+ if n.Type.IsEmptyInterface() {
+ f = ".type"
}
- // Return the two parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: u, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
+ c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
+ d := e.SplitSlot(&name, ".data", u.Size(), t)
+ return c, d
}
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.Elem())
lenType := types.Types[TINT]
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this slice up into three separate variables.
- p := e.splitSlot(&name, ".ptr", 0, ptrType)
- l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
- c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
- return p, l, c
- }
- // Return the three parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
- ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
- ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
+ p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+ l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+ c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
+ return p, l, c
}
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
@@ -6847,53 +6958,30 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
} else {
t = types.Types[TFLOAT32]
}
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this complex up into two separate variables.
- r := e.splitSlot(&name, ".real", 0, t)
- i := e.splitSlot(&name, ".imag", t.Size(), t)
- return r, i
- }
- // Return the two parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
+ r := e.SplitSlot(&name, ".real", 0, t)
+ i := e.SplitSlot(&name, ".imag", t.Size(), t)
+ return r, i
}
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
var t *types.Type
if name.Type.IsSigned() {
t = types.Types[TINT32]
} else {
t = types.Types[TUINT32]
}
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this int64 up into two separate variables.
- if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
- }
- return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
- }
- // Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
+ return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
}
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
+ return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
- n := name.N.(*Node)
st := name.Type
- ft := st.FieldType(i)
- var offset int64
- for f := 0; f < i; f++ {
- offset += st.FieldType(f).Size()
- }
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Note: the _ field may appear several times. But
- // have no fear, identically-named but distinct Autos are
- // ok, albeit maybe confusing for a debugger.
- return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
- }
- return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
+ // Note: the _ field may appear several times. But
+ // have no fear, identically-named but distinct Autos are
+ // ok, albeit maybe confusing for a debugger.
+ return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
@@ -6903,19 +6991,23 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
e.Fatalf(n.Pos, "bad array size")
}
et := at.Elem()
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- return e.splitSlot(&name, "[0]", 0, et)
- }
- return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
+ return e.SplitSlot(&name, "[0]", 0, et)
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset)
}
-// splitSlot returns a slot representing the data of parent starting at offset.
-func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
- s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
+// SplitSlot returns a slot representing the data of parent starting at offset.
+func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
+ node := parent.N.(*Node)
+
+ if node.Class() != PAUTO || node.Name.Addrtaken() {
+ // addressed things and non-autos retain their parents (i.e., cannot truly be split)
+ return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
+ }
+
+ s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
n := &Node{
Name: new(Name),
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 07547df36e..defefd76b3 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -96,7 +96,7 @@ func flusherrors() {
}
func hcrash() {
- if Debug['h'] != 0 {
+ if Debug.h != 0 {
flusherrors()
if outfile != "" {
os.Remove(outfile)
@@ -107,7 +107,7 @@ func hcrash() {
}
func linestr(pos src.XPos) string {
- return Ctxt.OutermostPos(pos).Format(Debug['C'] == 0, Debug['L'] == 1)
+ return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
}
// lasterror keeps track of the most recently issued error.
@@ -153,7 +153,7 @@ func yyerrorl(pos src.XPos, format string, args ...interface{}) {
hcrash()
nerrors++
- if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+ if nsavederrors+nerrors >= 10 && Debug.e == 0 {
flusherrors()
fmt.Printf("%v: too many errors\n", linestr(pos))
errorexit()
@@ -175,7 +175,7 @@ func Warn(fmt_ string, args ...interface{}) {
func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
adderr(line, fmt_, args...)
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
flusherrors()
}
}
@@ -222,7 +222,7 @@ func hasUniquePos(n *Node) bool {
}
if !n.Pos.IsKnown() {
- if Debug['K'] != 0 {
+ if Debug.K != 0 {
Warn("setlineno: unknown position (line 0)")
}
return false
@@ -348,7 +348,7 @@ func newname(s *types.Sym) *Node {
return n
}
-// newname returns a new ONAME Node associated with symbol s at position pos.
+// newnamel returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting n.Name.Curfn.
func newnamel(pos src.XPos, s *types.Sym) *Node {
if s == nil {
@@ -546,22 +546,19 @@ func methtype(t *types.Type) *types.Type {
// Is type src assignment compatible to type dst?
// If so, return op code to use in conversion.
-// If not, return OXXX.
-func assignop(src, dst *types.Type, why *string) Op {
- if why != nil {
- *why = ""
- }
-
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func assignop(src, dst *types.Type) (Op, string) {
if src == dst {
- return OCONVNOP
+ return OCONVNOP, ""
}
if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
- return OXXX
+ return OXXX, ""
}
// 1. src type is identical to dst.
if types.Identical(src, dst) {
- return OCONVNOP
+ return OCONVNOP, ""
}
// 2. src and dst have identical underlying types
@@ -575,13 +572,13 @@ func assignop(src, dst *types.Type, why *string) Op {
if src.IsEmptyInterface() {
// Conversion between two empty interfaces
// requires no code.
- return OCONVNOP
+ return OCONVNOP, ""
}
if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
// Conversion between two types, at least one unnamed,
// needs no conversion. The exception is nonempty interfaces
// which need to have their itab updated.
- return OCONVNOP
+ return OCONVNOP, ""
}
}
@@ -590,49 +587,47 @@ func assignop(src, dst *types.Type, why *string) Op {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
- return OCONVIFACE
+ return OCONVIFACE, ""
}
// we'll have complained about this method anyway, suppress spurious messages.
if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
- return OCONVIFACE
- }
-
- if why != nil {
- if isptrto(src, TINTER) {
- *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
- } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
- } else if have != nil && have.Sym == missing.Sym {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else if ptr != 0 {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
- } else if have != nil {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
- }
+ return OCONVIFACE, ""
+ }
+
+ var why string
+ if isptrto(src, TINTER) {
+ why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+ } else if have != nil {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
}
- return OXXX
+ return OXXX, why
}
if isptrto(dst, TINTER) {
- if why != nil {
- *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
- }
- return OXXX
+ why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ return OXXX, why
}
if src.IsInterface() && dst.Etype != TBLANK {
var missing, have *types.Field
var ptr int
- if why != nil && implements(dst, src, &missing, &have, &ptr) {
- *why = ": need type assertion"
+ var why string
+ if implements(dst, src, &missing, &have, &ptr) {
+ why = ": need type assertion"
}
- return OXXX
+ return OXXX, why
}
// 4. src is a bidirectional channel value, dst is a channel type,
@@ -640,7 +635,7 @@ func assignop(src, dst *types.Type, why *string) Op {
// either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
- return OCONVNOP
+ return OCONVNOP, ""
}
}
@@ -653,7 +648,7 @@ func assignop(src, dst *types.Type, why *string) Op {
TCHAN,
TINTER,
TSLICE:
- return OCONVNOP
+ return OCONVNOP, ""
}
}
@@ -661,26 +656,23 @@ func assignop(src, dst *types.Type, why *string) Op {
// 7. Any typed value can be assigned to the blank identifier.
if dst.Etype == TBLANK {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OXXX
+ return OXXX, ""
}
// Can we convert a value of type src to a value of type dst?
// If so, return op code to use in conversion (maybe OCONVNOP).
-// If not, return OXXX.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
// srcConstant indicates whether the value of type src is a constant.
-func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
- if why != nil {
- *why = ""
- }
-
+func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
if src == dst {
- return OCONVNOP
+ return OCONVNOP, ""
}
if src == nil || dst == nil {
- return OXXX
+ return OXXX, ""
}
// Conversions from regular to go:notinheap are not allowed
@@ -688,23 +680,19 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// rules.
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
- if why != nil {
- *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
- }
- return OXXX
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+ return OXXX, why
}
// (b) Disallow string to []T where T is go:notinheap.
if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
- if why != nil {
- *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
- }
- return OXXX
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+ return OXXX, why
}
// 1. src can be assigned to dst.
- op := assignop(src, dst, why)
+ op, why := assignop(src, dst)
if op != OXXX {
- return op
+ return op, why
}
// The rules for interfaces are no different in conversions
@@ -712,60 +700,57 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// with the good message from assignop.
// Otherwise clear the error.
if src.IsInterface() || dst.IsInterface() {
- return OXXX
- }
- if why != nil {
- *why = ""
+ return OXXX, why
}
// 2. Ignoring struct tags, src and dst have identical underlying types.
if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
- return OCONVNOP
+ return OCONVNOP, ""
}
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types.
if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
- return OCONVNOP
+ return OCONVNOP, ""
}
}
// 4. src and dst are both integer or floating point types.
if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OCONV
+ return OCONV, ""
}
// 5. src and dst are both complex types.
if src.IsComplex() && dst.IsComplex() {
if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OCONV
+ return OCONV, ""
}
// Special case for constant conversions: any numeric
// conversion is potentially okay. We'll validate further
// within evconst. See #38117.
if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
- return OCONV
+ return OCONV, ""
}
// 6. src is an integer or has type []byte or []rune
// and dst is a string type.
if src.IsInteger() && dst.IsString() {
- return ORUNESTR
+ return ORUNESTR, ""
}
if src.IsSlice() && dst.IsString() {
if src.Elem().Etype == types.Bytetype.Etype {
- return OBYTES2STR
+ return OBYTES2STR, ""
}
if src.Elem().Etype == types.Runetype.Etype {
- return ORUNES2STR
+ return ORUNES2STR, ""
}
}
@@ -773,21 +758,21 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// String to slice.
if src.IsString() && dst.IsSlice() {
if dst.Elem().Etype == types.Bytetype.Etype {
- return OSTR2BYTES
+ return OSTR2BYTES, ""
}
if dst.Elem().Etype == types.Runetype.Etype {
- return OSTR2RUNES
+ return OSTR2RUNES, ""
}
}
// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
- return OCONVNOP
+ return OCONVNOP, ""
}
// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
- return OCONVNOP
+ return OCONVNOP, ""
}
// src is map and dst is a pointer to corresponding hmap.
@@ -795,10 +780,10 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// go gc maps are implemented as a pointer to a hmap struct.
if src.Etype == TMAP && dst.IsPtr() &&
src.MapType().Hmap == dst.Elem() {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OXXX
+ return OXXX, ""
}
func assignconv(n *Node, t *types.Type, context string) *Node {
@@ -825,7 +810,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
- if n.Type == types.Idealbool && !t.IsBoolean() {
+ if n.Type == types.UntypedBool && !t.IsBoolean() {
if n.Op == ONAME || n.Op == OLITERAL {
r := nod(OCONVNOP, n, nil)
r.Type = types.Types[TBOOL]
@@ -839,8 +824,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
return n
}
- var why string
- op := assignop(n.Type, t, &why)
+ op, why := assignop(n.Type, t)
if op == OXXX {
yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
op = OCONV
@@ -1040,25 +1024,24 @@ func calcHasCall(n *Node) bool {
return false
}
-func badtype(op Op, tl *types.Type, tr *types.Type) {
- fmt_ := ""
+func badtype(op Op, tl, tr *types.Type) {
+ var s string
if tl != nil {
- fmt_ += fmt.Sprintf("\n\t%v", tl)
+ s += fmt.Sprintf("\n\t%v", tl)
}
if tr != nil {
- fmt_ += fmt.Sprintf("\n\t%v", tr)
+ s += fmt.Sprintf("\n\t%v", tr)
}
// common mistake: *struct and *interface.
if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
- fmt_ += "\n\t(*struct vs *interface)"
+ s += "\n\t(*struct vs *interface)"
} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
- fmt_ += "\n\t(*interface vs *struct)"
+ s += "\n\t(*interface vs *struct)"
}
}
- s := fmt_
yyerror("illegal types for operand: %v%s", op, s)
}
@@ -1523,7 +1506,7 @@ func structargs(tl *types.Type, mustname bool) []*Node {
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
- if false && Debug['r'] != 0 {
+ if false && Debug.r != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
@@ -1596,7 +1579,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
fn.Nbody.Append(call)
}
- if false && Debug['r'] != 0 {
+ if false && Debug.r != 0 {
dumplist("genwrapper body", fn.Nbody)
}
@@ -1737,7 +1720,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
// the method does not exist for value types.
rcvr := tm.Type.Recv().Type
if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
- if false && Debug['r'] != 0 {
+ if false && Debug.r != 0 {
yyerror("interface pointer mismatch")
}
@@ -1871,8 +1854,10 @@ func isdirectiface(t *types.Type) bool {
}
switch t.Etype {
- case TPTR,
- TCHAN,
+ case TPTR:
+ // Pointers to notinheap types must be stored indirectly. See issue 42076.
+ return !t.Elem().NotInHeap()
+ case TCHAN,
TMAP,
TFUNC,
TUNSAFEPTR:
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index 138b0acc53..8d9fbe300e 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -189,16 +189,19 @@ func typecheckExprSwitch(n *Node) {
continue
}
- switch {
- case nilonly != "" && !n1.isNil():
+ if nilonly != "" && !n1.isNil() {
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
- case t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type):
+ } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
- case assignop(n1.Type, t, nil) == 0 && assignop(t, n1.Type, nil) == 0:
- if n.Left != nil {
- yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
- } else {
- yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ } else {
+ op1, _ := assignop(n1.Type, t)
+ op2, _ := assignop(t, n1.Type)
+ if op1 == OXXX && op2 == OXXX {
+ if n.Left != nil {
+ yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
+ } else {
+ yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ }
}
}
@@ -358,8 +361,8 @@ func (s *exprSwitch) flush() {
// all we need here is consistency. We respect this
// sorting below.
sort.Slice(cc, func(i, j int) bool {
- si := strlit(cc[i].lo)
- sj := strlit(cc[j].lo)
+ si := cc[i].lo.StringVal()
+ sj := cc[j].lo.StringVal()
if len(si) != len(sj) {
return len(si) < len(sj)
}
@@ -368,7 +371,7 @@ func (s *exprSwitch) flush() {
// runLen returns the string length associated with a
// particular run of exprClauses.
- runLen := func(run []exprClause) int64 { return int64(len(strlit(run[0].lo))) }
+ runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) }
// Collapse runs of consecutive strings with the same length.
var runs [][]exprClause
@@ -405,7 +408,7 @@ func (s *exprSwitch) flush() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
- if last.jmp == c.jmp && last.hi.Int64()+1 == c.lo.Int64() {
+ if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() {
last.hi = c.lo
} else {
merged = append(merged, c)
@@ -440,7 +443,7 @@ func (c *exprClause) test(exprname *Node) *Node {
// Optimize "switch true { ...}" and "switch false { ... }".
if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
- if exprname.Val().U.(bool) {
+ if exprname.BoolVal() {
return c.lo
} else {
return nodl(c.pos, ONOT, c.lo, nil)
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
index 4aa2e230ce..58de9b5e3f 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -142,7 +142,7 @@ const (
_, _ // second nodeInitorder bit
_, nodeHasBreak
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
- _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND
+ _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
_, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this
_, nodeColas // OAS resulting from :=
@@ -247,7 +247,7 @@ func (n *Node) Val() Val {
// SetVal sets the Val for the node, which must not have been used with SetOpt.
func (n *Node) SetVal(v Val) {
if n.HasOpt() {
- Debug['h'] = 1
+ Debug.h = 1
Dump("have Opt", n)
Fatalf("have Opt")
}
@@ -270,7 +270,7 @@ func (n *Node) SetOpt(x interface{}) {
return
}
if n.HasVal() {
- Debug['h'] = 1
+ Debug.h = 1
Dump("have Val", n)
Fatalf("have Val")
}
@@ -460,14 +460,14 @@ type Param struct {
// x1 := xN.Defn
// x1.Innermost = xN.Outer
//
- // We leave xN.Innermost set so that we can still get to the original
+ // We leave x1.Innermost set so that we can still get to the original
// variable quickly. Not shown here, but once we're
// done parsing a function and no longer need xN.Outer for the
- // lexical x reference links as described above, closurebody
+ // lexical x reference links as described above, funcLit
// recomputes xN.Outer as the semantic x reference link tree,
// even filling in x in intermediate closures that might not
// have mentioned it along the way to inner closures that did.
- // See closurebody for details.
+ // See funcLit for details.
//
// During the eventual compilation, then, for closure variables we have:
//
@@ -480,11 +480,87 @@ type Param struct {
Innermost *Node
Outer *Node
- // OTYPE
- //
- // TODO: Should Func pragmas also be stored on the Name?
- Pragma PragmaFlag
- Alias bool // node is alias for Ntype (only used when type-checking ODCLTYPE)
+ // OTYPE & ONAME //go:embed info,
+ // sharing storage to reduce gc.Param size.
+ // Extra is nil, or else *Extra is a *paramType or an *embedFileList.
+ Extra *interface{}
+}
+
+type paramType struct {
+ flag PragmaFlag
+ alias bool
+}
+
+type embedFileList []string
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (p *Param) Pragma() PragmaFlag {
+ if p.Extra == nil {
+ return 0
+ }
+ return (*p.Extra).(*paramType).flag
+}
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (p *Param) SetPragma(flag PragmaFlag) {
+ if p.Extra == nil {
+ if flag == 0 {
+ return
+ }
+ p.Extra = new(interface{})
+ *p.Extra = &paramType{flag: flag}
+ return
+ }
+ (*p.Extra).(*paramType).flag = flag
+}
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (p *Param) Alias() bool {
+ if p.Extra == nil {
+ return false
+ }
+ t, ok := (*p.Extra).(*paramType)
+ if !ok {
+ return false
+ }
+ return t.alias
+}
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (p *Param) SetAlias(alias bool) {
+ if p.Extra == nil {
+ if !alias {
+ return
+ }
+ p.Extra = new(interface{})
+ *p.Extra = &paramType{alias: alias}
+ return
+ }
+ (*p.Extra).(*paramType).alias = alias
+}
+
+// EmbedFiles returns the list of embedded files for p,
+// which must be for an ONAME var.
+func (p *Param) EmbedFiles() []string {
+ if p.Extra == nil {
+ return nil
+ }
+ return *(*p.Extra).(*embedFileList)
+}
+
+// SetEmbedFiles sets the list of embedded files for p,
+// which must be for an ONAME var.
+func (p *Param) SetEmbedFiles(list []string) {
+ if p.Extra == nil {
+ if len(list) == 0 {
+ return
+ }
+ f := embedFileList(list)
+ p.Extra = new(interface{})
+ *p.Extra = &f
+ return
+ }
+ *(*p.Extra).(*embedFileList) = list
}
// Functions
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 0eb0dae373..8ebeaf1330 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -257,12 +257,12 @@ func typecheck(n *Node, top int) (res *Node) {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
- if n1.Name != nil && !n1.Name.Param.Alias {
+ if n1.Name != nil && !n1.Name.Param.Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
- if n.Name != nil && n.Name.Param.Alias && n.Type == nil {
+ if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
lineno = n.Pos
Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
@@ -361,7 +361,7 @@ func typecheck1(n *Node, top int) (res *Node) {
ok |= ctxExpr
if n.Type == nil && n.Val().Ctype() == CTSTR {
- n.Type = types.Idealstring
+ n.Type = types.UntypedString
}
case ONONAME:
@@ -623,8 +623,8 @@ func typecheck1(n *Node, top int) (res *Node) {
// no defaultlit for left
// the outer context gives the type
n.Type = l.Type
- if (l.Type == types.Idealfloat || l.Type == types.Idealcomplex) && r.Op == OLITERAL {
- n.Type = types.Idealint
+ if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL {
+ n.Type = types.UntypedInt
}
break
@@ -674,8 +674,8 @@ func typecheck1(n *Node, top int) (res *Node) {
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
if r.Type.Etype != TBLANK {
- aop = assignop(l.Type, r.Type, nil)
- if aop != 0 {
+ aop, _ = assignop(l.Type, r.Type)
+ if aop != OXXX {
if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
n.Type = nil
@@ -696,8 +696,8 @@ func typecheck1(n *Node, top int) (res *Node) {
}
if !converted && l.Type.Etype != TBLANK {
- aop = assignop(r.Type, l.Type, nil)
- if aop != 0 {
+ aop, _ = assignop(r.Type, l.Type)
+ if aop != OXXX {
if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
n.Type = nil
@@ -777,7 +777,7 @@ func typecheck1(n *Node, top int) (res *Node) {
if iscmp[n.Op] {
evconst(n)
- t = types.Idealbool
+ t = types.UntypedBool
if n.Op != OLITERAL {
l, r = defaultlit2(l, r, true)
n.Left = l
@@ -1046,13 +1046,13 @@ func typecheck1(n *Node, top int) (res *Node) {
}
if !n.Bounded() && Isconst(n.Right, CTINT) {
- x := n.Right.Int64()
+ x := n.Right.Int64Val()
if x < 0 {
yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
} else if t.IsArray() && x >= t.NumElem() {
yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
- } else if Isconst(n.Left, CTSTR) && x >= int64(len(strlit(n.Left))) {
- yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(strlit(n.Left)))
+ } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) {
+ yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
} else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
yyerror("invalid %s index %v (index too large)", why, n.Right)
}
@@ -1148,11 +1148,11 @@ func typecheck1(n *Node, top int) (res *Node) {
l = defaultlit(l, types.Types[TINT])
c = defaultlit(c, types.Types[TINT])
- if Isconst(l, CTINT) && l.Int64() < 0 {
+ if Isconst(l, CTINT) && l.Int64Val() < 0 {
Fatalf("len for OSLICEHEADER must be non-negative")
}
- if Isconst(c, CTINT) && c.Int64() < 0 {
+ if Isconst(c, CTINT) && c.Int64Val() < 0 {
Fatalf("cap for OSLICEHEADER must be non-negative")
}
@@ -1201,7 +1201,7 @@ func typecheck1(n *Node, top int) (res *Node) {
if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
Fatalf("len for OMAKESLICECOPY too large")
}
- if n.Left.Int64() < 0 {
+ if n.Left.Int64Val() < 0 {
Fatalf("len for OMAKESLICECOPY must be non-negative")
}
}
@@ -1458,7 +1458,7 @@ func typecheck1(n *Node, top int) (res *Node) {
// Determine result type.
switch t.Etype {
case TIDEAL:
- n.Type = types.Idealfloat
+ n.Type = types.UntypedFloat
case TCOMPLEX64:
n.Type = types.Types[TFLOAT32]
case TCOMPLEX128:
@@ -1504,7 +1504,7 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
case TIDEAL:
- t = types.Idealcomplex
+ t = types.UntypedComplex
case TFLOAT32:
t = types.Types[TCOMPLEX64]
@@ -1691,7 +1691,7 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
}
var why string
- n.Op = convertop(n.Left.Op == OLITERAL, t, n.Type, &why)
+ n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type)
if n.Op == OXXX {
if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
@@ -2187,14 +2187,14 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
}
if r.Op == OLITERAL {
- if r.Int64() < 0 {
+ if r.Int64Val() < 0 {
yyerror("invalid slice index %v (index must be non-negative)", r)
return false
- } else if tp != nil && tp.NumElem() >= 0 && r.Int64() > tp.NumElem() {
+ } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() {
yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
return false
- } else if Isconst(l, CTSTR) && r.Int64() > int64(len(strlit(l))) {
- yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(strlit(l)))
+ } else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) {
+ yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
return false
} else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
yyerror("invalid slice index %v (index too large)", r)
@@ -2516,7 +2516,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
- } else if tt.IsPtr() && !rcvr.IsPtr() && types.Identical(tt.Elem(), rcvr) {
+ } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
n.Left = nod(ODEREF, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
@@ -2724,9 +2724,9 @@ func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
// e.g in error messages about wrong arguments to return.
func sigrepr(t *types.Type, isddd bool) string {
switch t {
- case types.Idealstring:
+ case types.UntypedString:
return "string"
- case types.Idealbool:
+ case types.UntypedBool:
return "bool"
}
@@ -3267,9 +3267,7 @@ func typecheckas(n *Node) {
}
func checkassignto(src *types.Type, dst *Node) {
- var why string
-
- if assignop(src, dst.Type, &why) == 0 {
+ if op, why := assignop(src, dst.Type); op == OXXX {
yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
@@ -3450,9 +3448,8 @@ func stringtoruneslit(n *Node) *Node {
}
var l []*Node
- s := strlit(n.Left)
i := 0
- for _, r := range s {
+ for _, r := range n.Left.StringVal() {
l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
i++
}
@@ -3507,7 +3504,7 @@ func setUnderlying(t, underlying *types.Type) {
}
// Propagate go:notinheap pragma from the Name to the Type.
- if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma&NotInHeap != 0 {
+ if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
t.SetNotInHeap(true)
}
@@ -3679,7 +3676,7 @@ func typecheckdef(n *Node) {
n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
case OTYPE:
- if p := n.Name.Param; p.Alias {
+ if p := n.Name.Param; p.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
// If we have a syntax error, p.Ntype may be nil.
@@ -3904,7 +3901,7 @@ func deadcodefn(fn *Node) {
return
}
case OFOR:
- if !Isconst(n.Left, CTBOOL) || n.Left.Bool() {
+ if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() {
return
}
default:
@@ -3934,7 +3931,7 @@ func deadcodeslice(nn Nodes) {
n.Left = deadcodeexpr(n.Left)
if Isconst(n.Left, CTBOOL) {
var body Nodes
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
n.Rlist = Nodes{}
body = n.Nbody
} else {
@@ -3977,7 +3974,7 @@ func deadcodeexpr(n *Node) *Node {
n.Left = deadcodeexpr(n.Left)
n.Right = deadcodeexpr(n.Right)
if Isconst(n.Left, CTBOOL) {
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
return n.Right // true && x => x
} else {
return n.Left // false && x => false
@@ -3987,7 +3984,7 @@ func deadcodeexpr(n *Node) *Node {
n.Left = deadcodeexpr(n.Left)
n.Right = deadcodeexpr(n.Right)
if Isconst(n.Left, CTBOOL) {
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
return n.Left // true || x => true
} else {
return n.Right // false || x => x
diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go
index 04861c8dd4..ff8cabd8e3 100644
--- a/src/cmd/compile/internal/gc/universe.go
+++ b/src/cmd/compile/internal/gc/universe.go
@@ -123,21 +123,21 @@ func lexinit() {
asNode(s2.Def).SetSubOp(s.op)
}
- types.Idealstring = types.New(TSTRING)
- types.Idealbool = types.New(TBOOL)
+ types.UntypedString = types.New(TSTRING)
+ types.UntypedBool = types.New(TBOOL)
types.Types[TANY] = types.New(TANY)
s := builtinpkg.Lookup("true")
s.Def = asTypesNode(nodbool(true))
asNode(s.Def).Sym = lookup("true")
asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.Idealbool
+ asNode(s.Def).Type = types.UntypedBool
s = builtinpkg.Lookup("false")
s.Def = asTypesNode(nodbool(false))
asNode(s.Def).Sym = lookup("false")
asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.Idealbool
+ asNode(s.Def).Type = types.UntypedBool
s = lookup("_")
s.Block = -100
@@ -351,7 +351,7 @@ func typeinit() {
sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
dowidth(types.Types[TSTRING])
- dowidth(types.Idealstring)
+ dowidth(types.UntypedString)
}
func makeErrorInterface() *types.Type {
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 8e45059eab..b453e9f1d9 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -21,7 +21,7 @@ const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
func walk(fn *Node) {
Curfn = fn
- if Debug['W'] != 0 {
+ if Debug.W != 0 {
s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
@@ -63,14 +63,14 @@ func walk(fn *Node) {
return
}
walkstmtlist(Curfn.Nbody.Slice())
- if Debug['W'] != 0 {
+ if Debug.W != 0 {
s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
zeroResults()
heapmoves()
- if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
+ if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Func.Enter)
}
@@ -336,19 +336,6 @@ func walkstmt(n *Node) *Node {
return n
}
-func isSmallMakeSlice(n *Node) bool {
- if n.Op != OMAKESLICE {
- return false
- }
- r := n.Right
- if r == nil {
- r = n.Left
- }
- t := n.Type
-
- return smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < maxImplicitStackVarSize/t.Elem().Width)
-}
-
// walk the whole tree of the body of an
// expression or simple statement.
// the types expressions are calculated.
@@ -449,7 +436,7 @@ func walkexpr(n *Node, init *Nodes) *Node {
lno := setlineno(n)
- if Debug['w'] > 1 {
+ if Debug.w > 1 {
Dump("before walk expr", n)
}
@@ -487,7 +474,7 @@ opswitch:
ODEREF, OSPTR, OITAB, OIDATA, OADDR:
n.Left = walkexpr(n.Left, init)
- case OEFACE, OAND, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
+ case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@@ -978,14 +965,6 @@ opswitch:
fn := basicnames[param] + "to" + basicnames[result]
n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
- case OANDNOT:
- n.Left = walkexpr(n.Left, init)
- n.Op = OAND
- n.SetImplicit(true) // for walkCheckPtrArithmetic
- n.Right = nod(OBITNOT, n.Right, nil)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Right = walkexpr(n.Right, init)
-
case ODIV, OMOD:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@@ -1014,7 +993,7 @@ opswitch:
// The SSA backend will handle those.
switch et {
case TINT64:
- c := n.Right.Int64()
+ c := n.Right.Int64Val()
if c < 0 {
c = -c
}
@@ -1022,7 +1001,7 @@ opswitch:
break opswitch
}
case TUINT64:
- c := uint64(n.Right.Int64())
+ c := uint64(n.Right.Int64Val())
if c != 0 && c&(c-1) == 0 {
break opswitch
}
@@ -1062,15 +1041,15 @@ opswitch:
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
- if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
+ if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
yyerror("index out of bounds")
}
} else if Isconst(n.Left, CTSTR) {
- n.SetBounded(bounded(r, int64(len(strlit(n.Left)))))
- if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
+ n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
+ if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
@@ -1339,8 +1318,8 @@ opswitch:
yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
if n.Esc == EscNone {
- if !isSmallMakeSlice(n) {
- Fatalf("non-small OMAKESLICE with EscNone: %v", n)
+ if why := heapAllocReason(n); why != "" {
+ Fatalf("%v has EscNone, but %v", n, why)
}
// var arr [r]T
// n = arr[:l]
@@ -1504,7 +1483,7 @@ opswitch:
case OSTR2BYTES:
s := n.Left
if Isconst(s, CTSTR) {
- sc := strlit(s)
+ sc := s.StringVal()
// Allocate a [n]byte of the right size.
t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
@@ -1612,7 +1591,7 @@ opswitch:
updateHasCall(n)
- if Debug['w'] != 0 && n != nil {
+ if Debug.w != 0 && n != nil {
Dump("after walk expr", n)
}
@@ -1932,7 +1911,7 @@ func walkprint(nn *Node, init *Nodes) *Node {
for i := 0; i < len(s); {
var strs []string
for i < len(s) && Isconst(s[i], CTSTR) {
- strs = append(strs, strlit(s[i]))
+ strs = append(strs, s[i].StringVal())
i++
}
if len(strs) > 0 {
@@ -1978,7 +1957,17 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printiface")
}
on = substArgTypes(on, n.Type) // any-1
- case TPTR, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ case TPTR:
+ if n.Type.Elem().NotInHeap() {
+ on = syslook("printuintptr")
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUNSAFEPTR]
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUINTPTR]
+ break
+ }
+ fallthrough
+ case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
on = syslook("printpointer")
on = substArgTypes(on, n.Type) // any-1
case TSLICE:
@@ -2001,7 +1990,7 @@ func walkprint(nn *Node, init *Nodes) *Node {
case TSTRING:
cs := ""
if Isconst(n, CTSTR) {
- cs = strlit(n)
+ cs = n.StringVal()
}
switch cs {
case " ":
@@ -2170,7 +2159,7 @@ func reorder3(all []*Node) []*Node {
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
- if !aliased(n, all, i) {
+ if !aliased(n, all[:i]) {
return n
}
@@ -2202,73 +2191,75 @@ func outervalue(n *Node) *Node {
}
}
-// Is it possible that the computation of n might be
-// affected by writes in as up to but not including the ith element?
-func aliased(n *Node, all []*Node, i int) bool {
- if n == nil {
+// Is it possible that the computation of r might be
+// affected by assignments in all?
+func aliased(r *Node, all []*Node) bool {
+ if r == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
- for n.Op == ODOT {
- n = n.Left
+ for r.Op == ODOT {
+ r = r.Left
}
// Look for obvious aliasing: a variable being assigned
// during the all list and appearing in n.
- // Also record whether there are any writes to main memory.
- // Also record whether there are any writes to variables
- // whose addresses have been taken.
+ // Also record whether there are any writes to addressable
+ // memory (either main memory or variables whose addresses
+ // have been taken).
memwrite := false
- varwrite := false
- for _, an := range all[:i] {
- a := outervalue(an.Left)
-
- for a.Op == ODOT {
- a = a.Left
+ for _, as := range all {
+ // We can ignore assignments to blank.
+ if as.Left.isBlank() {
+ continue
}
- if a.Op != ONAME {
+ l := outervalue(as.Left)
+ if l.Op != ONAME {
memwrite = true
continue
}
- switch n.Class() {
+ switch l.Class() {
default:
- varwrite = true
+ Fatalf("unexpected class: %v, %v", l, l.Class())
+
+ case PAUTOHEAP, PEXTERN:
+ memwrite = true
continue
case PAUTO, PPARAM, PPARAMOUT:
- if n.Name.Addrtaken() {
- varwrite = true
+ if l.Name.Addrtaken() {
+ memwrite = true
continue
}
- if vmatch2(a, n) {
- // Direct hit.
+ if vmatch2(l, r) {
+ // Direct hit: l appears in r.
return true
}
}
}
- // The variables being written do not appear in n.
- // However, n might refer to computed addresses
+ // The variables being written do not appear in r.
+ // However, r might refer to computed addresses
// that are being written.
// If no computed addresses are affected by the writes, no aliasing.
- if !memwrite && !varwrite {
+ if !memwrite {
return false
}
- // If n does not refer to computed addresses
- // (that is, if n only refers to variables whose addresses
+ // If r does not refer to computed addresses
+ // (that is, if r only refers to variables whose addresses
// have not been taken), no aliasing.
- if varexpr(n) {
+ if varexpr(r) {
return false
}
- // Otherwise, both the writes and n refer to computed memory addresses.
+ // Otherwise, both the writes and r refer to computed memory addresses.
// Assume that they might conflict.
return true
}
@@ -2656,7 +2647,7 @@ func addstr(n *Node, init *Nodes) *Node {
sz := int64(0)
for _, n1 := range n.List.Slice() {
if n1.Op == OLITERAL {
- sz += int64(len(strlit(n1)))
+ sz += int64(len(n1.StringVal()))
}
}
@@ -2830,7 +2821,7 @@ func appendslice(n *Node, init *Nodes) *Node {
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
func isAppendOfMake(n *Node) bool {
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return false
}
@@ -3450,7 +3441,7 @@ func walkcompare(n *Node, init *Nodes) *Node {
func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64() < 0 {
+ if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
n = copyexpr(n, n.Type, init)
}
@@ -3520,7 +3511,7 @@ func walkcompareString(n *Node, init *Nodes) *Node {
// Length-only checks are ok, though.
maxRewriteLen = 0
}
- if s := strlit(cs); len(s) <= maxRewriteLen {
+ if s := cs.StringVal(); len(s) <= maxRewriteLen {
if len(s) > 0 {
ncs = safeexpr(ncs, init)
}
@@ -3615,26 +3606,32 @@ func bounded(n *Node, max int64) bool {
bits := int32(8 * n.Type.Width)
if smallintconst(n) {
- v := n.Int64()
+ v := n.Int64Val()
return 0 <= v && v < max
}
switch n.Op {
- case OAND:
+ case OAND, OANDNOT:
v := int64(-1)
- if smallintconst(n.Left) {
- v = n.Left.Int64()
- } else if smallintconst(n.Right) {
- v = n.Right.Int64()
+ switch {
+ case smallintconst(n.Left):
+ v = n.Left.Int64Val()
+ case smallintconst(n.Right):
+ v = n.Right.Int64Val()
+ if n.Op == OANDNOT {
+ v = ^v
+ if !sign {
+ v &= 1<<uint(bits) - 1
+ }
+ }
}
-
if 0 <= v && v < max {
return true
}
case OMOD:
if !sign && smallintconst(n.Right) {
- v := n.Right.Int64()
+ v := n.Right.Int64Val()
if 0 <= v && v <= max {
return true
}
@@ -3642,7 +3639,7 @@ func bounded(n *Node, max int64) bool {
case ODIV:
if !sign && smallintconst(n.Right) {
- v := n.Right.Int64()
+ v := n.Right.Int64Val()
for bits > 0 && v >= 2 {
bits--
v >>= 1
@@ -3651,7 +3648,7 @@ func bounded(n *Node, max int64) bool {
case ORSH:
if !sign && smallintconst(n.Right) {
- v := n.Right.Int64()
+ v := n.Right.Int64Val()
if v > int64(bits) {
return true
}
@@ -3892,6 +3889,16 @@ func wrapCall(n *Node, init *Nodes) *Node {
}
isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
+
+ // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
+ if !isBuiltinCall && n.IsDDD() {
+ last := n.List.Len() - 1
+ if va := n.List.Index(last); va.Op == OSLICELIT {
+ n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ n.SetIsDDD(false)
+ }
+ }
+
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
origArgs := make([]*Node, n.List.Len())
t := nod(OTFUNC, nil, nil)
@@ -3977,7 +3984,7 @@ func canMergeLoads() bool {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
func isRuneCount(n *Node) bool {
- return Debug['N'] == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
+ return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
}
func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
@@ -4046,12 +4053,8 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
case OADD:
walk(n.Left)
walk(n.Right)
- case OSUB:
+ case OSUB, OANDNOT:
walk(n.Left)
- case OAND:
- if n.Implicit() { // was OANDNOT
- walk(n.Left)
- }
case OCONVNOP:
if n.Left.Type.IsUnsafePtr() {
n.Left = cheapexpr(n.Left, init)
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
index b57a07f12c..fca85c10fb 100644
--- a/src/cmd/compile/internal/logopt/logopt_test.go
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -208,12 +208,11 @@ func s15a8(x *[15]int64) [15]int64 {
`"relatedInformation":[{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"}]}`)
want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
- want(t, slogged, `{"range":{"start":{"line":21,"character":21},"end":{"line":21,"character":21}},"severity":3,"code":"cannotInlineCall","source":"go compiler","message":"foo cannot be inlined (escaping closure variable)"}`)
// escape analysis explanation
want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y = \u003cN\u003e (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~r1 = y:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index a5fbdaffba..3888aa6527 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -166,34 +166,46 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.To.Reg = v.Reg1()
case ssa.OpPPC64LoweredAtomicAnd8,
- ssa.OpPPC64LoweredAtomicOr8:
+ ssa.OpPPC64LoweredAtomicAnd32,
+ ssa.OpPPC64LoweredAtomicOr8,
+ ssa.OpPPC64LoweredAtomicOr32:
// LWSYNC
- // LBAR (Rarg0), Rtmp
+ // LBAR/LWAR (Rarg0), Rtmp
// AND/OR Rarg1, Rtmp
- // STBCCC Rtmp, (Rarg0)
+ // STBCCC/STWCCC Rtmp, (Rarg0)
// BNE -3(PC)
+ ld := ppc64.ALBAR
+ st := ppc64.ASTBCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
plwsync := s.Prog(ppc64.ALWSYNC)
plwsync.To.Type = obj.TYPE_NONE
- p := s.Prog(ppc64.ALBAR)
+ // LBAR or LWAR
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
+ // AND/OR reg1,out
p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGTMP
- p2 := s.Prog(ppc64.ASTBCCC)
+ // STBCCC or STWCCC
+ p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = ppc64.REGTMP
+ // BNE retry
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
@@ -570,9 +582,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[0].Reg()
shifts := v.AuxInt
p := s.Prog(v.Op.Asm())
- // clrlslwi ra,rs,sh,mb will become rlwinm ra,rs,sh,mb-sh,31-n as described in ISA
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
- p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)})
+ // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)})
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
@@ -582,9 +594,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r1 := v.Args[0].Reg()
shifts := v.AuxInt
p := s.Prog(v.Op.Asm())
- // clrlsldi ra,rs,sh,mb will become rldic ra,rs,sh,mb-sh
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
- p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)})
+ // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)})
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
@@ -637,6 +649,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ // Auxint holds encoded rotate + mask
+ case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
+ rot, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
+
+ // Auxint holds mask
+ case ssa.OpPPC64RLWNM:
+ _, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
+
case ssa.OpPPC64MADDLD:
r := v.Reg()
r1 := v.Args[0].Reg()
@@ -677,7 +707,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
- ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst:
+ ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst,
+ ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst:
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
index be31fad441..f7c03fe7c2 100644
--- a/src/cmd/compile/internal/riscv64/ggen.go
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -25,7 +25,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
return p
}
- // TODO(jsing): Add a duff zero implementation for medium sized ranges.
+ if cnt <= int64(128*gc.Widthptr) {
+ p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+ return p
+ }
// Loop, zeroing pointer width bytes at a time.
// ADD $(off), SP, T0
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 73f0dbc195..0beb5b4bd1 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -190,7 +190,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// input args need no code
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
- case ssa.OpCopy, ssa.OpRISCV64MOVconvert:
+ case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
}
@@ -208,6 +208,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = rs
p.To.Type = obj.TYPE_REG
p.To.Reg = rd
+ case ssa.OpRISCV64MOVDnop:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
@@ -228,6 +233,37 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
+ case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
+ ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
+ a = a.Args[0]
+ }
+ as := v.Op.Asm()
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load and already sign/zero-extended
+ if rs == rd {
+ return
+ }
+ as = riscv.AMOV
+ default:
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
@@ -572,6 +608,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpRISCV64DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpRISCV64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffcopy
+ p.To.Offset = v.AuxInt
+
default:
v.Fatalf("Unhandled op %v", v.Op)
}
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index 00d253c95a..e23b31f385 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -761,6 +761,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
+ case ssa.OpS390XLAN, ssa.OpS390XLAO:
+ // LA(N|O) Ry, TMP, 0(Rx)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = s390x.REGTMP
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = v.Args[0].Reg()
case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor:
r := v.Args[0].Reg() // clobbered, assumed R1 in comments
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
index aae0def27f..1baf143869 100644
--- a/src/cmd/compile/internal/ssa/addressingmodes.go
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -59,22 +59,22 @@ func addressingModes(f *Func) {
v.AuxInt += p.AuxInt
case [2]auxType{auxSymValAndOff, auxInt32}:
vo := ValAndOff(v.AuxInt)
- if !vo.canAdd(p.AuxInt) {
+ if !vo.canAdd64(p.AuxInt) {
continue
}
- v.AuxInt = vo.add(p.AuxInt)
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
case [2]auxType{auxSymValAndOff, auxSymOff}:
vo := ValAndOff(v.AuxInt)
if v.Aux != nil && p.Aux != nil {
continue
}
- if !vo.canAdd(p.AuxInt) {
+ if !vo.canAdd64(p.AuxInt) {
continue
}
if p.Aux != nil {
v.Aux = p.Aux
}
- v.AuxInt = vo.add(p.AuxInt)
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
case [2]auxType{auxSymOff, auxNone}:
// nothing to do
case [2]auxType{auxSymValAndOff, auxNone}:
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index 4f9fd8e22e..1d34f8160b 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -35,7 +35,7 @@ func branchelim(f *Func) {
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
- case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32:
+ case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64:
loadAddr.add(v.Args[0].ID)
case OpMove:
loadAddr.add(v.Args[1].ID)
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 4eed612977..bddd271273 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -47,6 +47,9 @@ func Compile(f *Func) {
stack := make([]byte, 16384)
n := runtime.Stack(stack, false)
stack = stack[:n]
+ if f.HTMLWriter != nil {
+ f.HTMLWriter.flushPhases()
+ }
f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack)
}
}()
@@ -201,6 +204,13 @@ func (p *pass) addDump(s string) {
p.dump[s] = true
}
+func (p *pass) String() string {
+ if p == nil {
+ return "nil pass"
+ }
+ return p.name
+}
+
// Run consistency checker between each phase
var (
checkEnabled = false
@@ -432,8 +442,8 @@ var passes = [...]pass{
{name: "prove", fn: prove},
{name: "early fuse", fn: fuseEarly},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
+ {name: "expand calls", fn: expandCalls, required: true},
{name: "softfloat", fn: softfloat, required: true},
- {name: "expand calls", fn:expandCalls, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 7f01f8047f..cb6f6fe7a1 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -38,7 +38,6 @@ type Config struct {
useSSE bool // Use SSE for non-float operations
useAvg bool // Use optimizations that need Avg* operations
useHmul bool // Use optimizations that need Hmul* operations
- use387 bool // GO386=387
SoftFloat bool //
Race bool // race detector enabled
NeedsFpScratch bool // No direct move between GP and FP register sets
@@ -150,6 +149,7 @@ type Frontend interface {
SplitStruct(LocalSlot, int) LocalSlot
SplitArray(LocalSlot) LocalSlot // array must be length 1
SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
+ SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
// DerefItab dereferences an itab function
// entry, given the symbol of the itab and
@@ -196,6 +196,14 @@ const (
ClassParamOut // return value
)
+const go116lateCallExpansion = true
+
+// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
+// within compilation of a function/method triggered by GOSSAHASH (defaults to "yes").
+func LateCallExpansionEnabledWithin(f *Func) bool {
+ return go116lateCallExpansion && f.DebugTest // Currently set up for GOSSAHASH bug searches
+}
+
// NewConfig returns a new configuration object for the given architecture.
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
c := &Config{arch: arch, Types: types}
@@ -379,9 +387,4 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
return c
}
-func (c *Config) Set387(b bool) {
- c.NeedsFpScratch = b
- c.use387 = b
-}
-
func (c *Config) Ctxt() *obj.Link { return c.ctxt }
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
index ab27ba85ae..bf7f1e826b 100644
--- a/src/cmd/compile/internal/ssa/decompose.go
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -6,6 +6,7 @@ package ssa
import (
"cmd/compile/internal/types"
+ "sort"
)
// decompose converts phi ops on compound builtin types into phi
@@ -31,77 +32,79 @@ func decomposeBuiltIn(f *Func) {
}
// Split up named values into their components.
+ // accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion,
+ // accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for
+ // builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots.
+ var toDelete []namedVal
var newNames []LocalSlot
- for _, name := range f.Names {
+ for i, name := range f.Names {
t := name.Type
switch {
case t.IsInteger() && t.Size() > f.Config.RegSize:
hiName, loName := f.fe.SplitInt64(name)
newNames = append(newNames, hiName, loName)
- for _, v := range f.NamedValues[name] {
+ for j, v := range f.NamedValues[name] {
if v.Op != OpInt64Make {
continue
}
f.NamedValues[hiName] = append(f.NamedValues[hiName], v.Args[0])
f.NamedValues[loName] = append(f.NamedValues[loName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
}
- delete(f.NamedValues, name)
case t.IsComplex():
rName, iName := f.fe.SplitComplex(name)
newNames = append(newNames, rName, iName)
- for _, v := range f.NamedValues[name] {
+ for j, v := range f.NamedValues[name] {
if v.Op != OpComplexMake {
continue
}
f.NamedValues[rName] = append(f.NamedValues[rName], v.Args[0])
f.NamedValues[iName] = append(f.NamedValues[iName], v.Args[1])
-
+ toDelete = append(toDelete, namedVal{i, j})
}
- delete(f.NamedValues, name)
case t.IsString():
ptrName, lenName := f.fe.SplitString(name)
newNames = append(newNames, ptrName, lenName)
- for _, v := range f.NamedValues[name] {
+ for j, v := range f.NamedValues[name] {
if v.Op != OpStringMake {
continue
}
f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
}
- delete(f.NamedValues, name)
case t.IsSlice():
ptrName, lenName, capName := f.fe.SplitSlice(name)
newNames = append(newNames, ptrName, lenName, capName)
- for _, v := range f.NamedValues[name] {
+ for j, v := range f.NamedValues[name] {
if v.Op != OpSliceMake {
continue
}
f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
f.NamedValues[capName] = append(f.NamedValues[capName], v.Args[2])
+ toDelete = append(toDelete, namedVal{i, j})
}
- delete(f.NamedValues, name)
case t.IsInterface():
typeName, dataName := f.fe.SplitInterface(name)
newNames = append(newNames, typeName, dataName)
- for _, v := range f.NamedValues[name] {
+ for j, v := range f.NamedValues[name] {
if v.Op != OpIMake {
continue
}
f.NamedValues[typeName] = append(f.NamedValues[typeName], v.Args[0])
f.NamedValues[dataName] = append(f.NamedValues[dataName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
}
- delete(f.NamedValues, name)
case t.IsFloat():
// floats are never decomposed, even ones bigger than RegSize
- newNames = append(newNames, name)
case t.Size() > f.Config.RegSize:
f.Fatalf("undecomposed named type %s %v", name, t)
- default:
- newNames = append(newNames, name)
}
}
- f.Names = newNames
+
+ deleteNamedVals(f, toDelete)
+ f.Names = append(f.Names, newNames...)
}
func decomposeBuiltInPhi(v *Value) {
@@ -263,14 +266,20 @@ func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalS
f.Fatalf("array not of size 1")
}
elemName := f.fe.SplitArray(name)
+ var keep []*Value
for _, v := range f.NamedValues[name] {
if v.Op != OpArrayMake1 {
+ keep = append(keep, v)
continue
}
f.NamedValues[elemName] = append(f.NamedValues[elemName], v.Args[0])
}
- // delete the name for the array as a whole
- delete(f.NamedValues, name)
+ if len(keep) == 0 {
+ // delete the name for the array as a whole
+ delete(f.NamedValues, name)
+ } else {
+ f.NamedValues[name] = keep
+ }
if t.Elem().IsArray() {
return decomposeUserArrayInto(f, elemName, slots)
@@ -300,17 +309,23 @@ func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []Local
}
makeOp := StructMakeOp(n)
+ var keep []*Value
// create named values for each struct field
for _, v := range f.NamedValues[name] {
if v.Op != makeOp {
+ keep = append(keep, v)
continue
}
for i := 0; i < len(fnames); i++ {
f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], v.Args[i])
}
}
- // remove the name of the struct as a whole
- delete(f.NamedValues, name)
+ if len(keep) == 0 {
+ // delete the name for the struct as a whole
+ delete(f.NamedValues, name)
+ } else {
+ f.NamedValues[name] = keep
+ }
// now that this f.NamedValues contains values for the struct
// fields, recurse into nested structs
@@ -400,3 +415,35 @@ func StructMakeOp(nf int) Op {
}
panic("too many fields in an SSAable struct")
}
+
+type namedVal struct {
+ locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key
+}
+
+// deleteNamedVals removes particular values with debugger names from f's naming data structures
+func deleteNamedVals(f *Func, toDelete []namedVal) {
+ // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalid pending indices.
+ sort.Slice(toDelete, func(i, j int) bool {
+ if toDelete[i].locIndex != toDelete[j].locIndex {
+ return toDelete[i].locIndex > toDelete[j].locIndex
+ }
+ return toDelete[i].valIndex > toDelete[j].valIndex
+
+ })
+
+ // Get rid of obsolete names
+ for _, d := range toDelete {
+ loc := f.Names[d.locIndex]
+ vals := f.NamedValues[loc]
+ l := len(vals) - 1
+ if l > 0 {
+ vals[d.valIndex] = vals[l]
+ f.NamedValues[loc] = vals[:l]
+ } else {
+ delete(f.NamedValues, loc)
+ l = len(f.Names) - 1
+ f.Names[d.locIndex] = f.Names[l]
+ f.Names = f.Names[:l]
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 34cff51c00..3e3573ff39 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -4,14 +4,140 @@
package ssa
-import "cmd/compile/internal/types"
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+type selKey struct {
+ from *Value
+ offset int64
+ size int64
+ typ types.EType
+}
+
+type offsetKey struct {
+ from *Value
+ offset int64
+ pt *types.Type
+}
// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
-// that is more oriented to a platform's ABI. The SelectN operations that extract results are also rewritten into
-// more appropriate forms.
+// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
+// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
+// reached.
func expandCalls(f *Func) {
+ // Calls that need lowering have some number of inputs, including a memory input,
+ // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+ // With the current ABI those inputs need to be converted into stores to memory,
+ // rethreading the call's memory input to the first, and the new call now receiving the last.
+
+ // With the current ABI, the outputs need to be converted to loads, which will all use the call's
+ // memory output as their input.
+ if !LateCallExpansionEnabledWithin(f) {
+ return
+ }
+ debug := f.pass.debug > 0
+
canSSAType := f.fe.CanSSA
+ regSize := f.Config.RegSize
sp, _ := f.spSb()
+ typ := &f.Config.Types
+ ptrSize := f.Config.PtrSize
+
+ // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+ var hiOffset, lowOffset int64
+ if f.Config.BigEndian {
+ lowOffset = 4
+ } else {
+ hiOffset = 4
+ }
+
+ namedSelects := make(map[*Value][]namedVal)
+
+ // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+ // that has no 64-bit integer registers.
+ intPairTypes := func(et types.EType) (tHi, tLo *types.Type) {
+ tHi = typ.UInt32
+ if et == types.TINT64 {
+ tHi = typ.Int32
+ }
+ tLo = typ.UInt32
+ return
+ }
+
+ // isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
+ // that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
+ // so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
+ // integer on 32-bit).
+ isAlreadyExpandedAggregateType := func(t *types.Type) bool {
+ if !canSSAType(t) {
+ return false
+ }
+ return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
+ t.Size() > regSize && t.IsInteger()
+ }
+
+ offsets := make(map[offsetKey]*Value)
+
+ // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+ // TODO should also optimize offsets from SB?
+ offsetFrom := func(from *Value, offset int64, pt *types.Type) *Value {
+ if offset == 0 && from.Type == pt { // this is not actually likely
+ return from
+ }
+ // Simplify, canonicalize
+ for from.Op == OpOffPtr {
+ offset += from.AuxInt
+ from = from.Args[0]
+ }
+ if from == sp {
+ return f.ConstOffPtrSP(pt, offset, sp)
+ }
+ key := offsetKey{from, offset, pt}
+ v := offsets[key]
+ if v != nil {
+ return v
+ }
+ v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
+ offsets[key] = v
+ return v
+ }
+
+ splitSlots := func(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot {
+ var locs []LocalSlot
+ for i := range ls {
+ locs = append(locs, f.fe.SplitSlot(&ls[i], sfx, offset, ty))
+ }
+ return locs
+ }
+
+ // removeTrivialWrapperTypes unwraps layers of
+ // struct { singleField SomeType } and [1]SomeType
+ // until a non-wrapper type is reached. This is useful
+ // for working with assignments to/from interface data
+ // fields (either second operand to OpIMake or OpIData)
+ // where the wrapping or type conversion can be elided
+ // because of type conversions/assertions in source code
+ // that do not appear in SSA.
+ removeTrivialWrapperTypes := func(t *types.Type) *types.Type {
+ for {
+ if t.IsStruct() && t.NumFields() == 1 {
+ t = t.Field(0).Type
+ continue
+ }
+ if t.IsArray() && t.NumElem() == 1 {
+ t = t.Elem()
+ continue
+ }
+ break
+ }
+ return t
+ }
+
// Calls that need lowering have some number of inputs, including a memory input,
// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
@@ -21,79 +147,631 @@ func expandCalls(f *Func) {
// With the current ABI, the outputs need to be converted to loads, which will all use the call's
// memory output as their input.
- // Step 1: find all references to calls as values and rewrite those.
+ // rewriteSelect recursively walks leaf selector to a root (OpSelectN) through
+ // a chain of Struct/Array Select operations. If the chain of selectors does not
+ // end in OpSelectN, it does nothing (this can happen depending on compiler phase ordering).
+ // It emits the code necessary to implement the leaf select operation that leads to the call.
+ // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
+ var rewriteSelect func(leaf *Value, selector *Value, offset int64) []LocalSlot
+ rewriteSelect = func(leaf *Value, selector *Value, offset int64) []LocalSlot {
+ var locs []LocalSlot
+ leafType := leaf.Type
+ switch selector.Op {
+ case OpSelectN:
+ // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
+ for _, s := range namedSelects[selector] {
+ locs = append(locs, f.Names[s.locIndex])
+ }
+ call := selector.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := selector.AuxInt
+ if which == aux.NResults() { // mem is after the results.
+ // rewrite v as a Copy of call -- the replacement call will produce a mem.
+ leaf.copyOf(call)
+ } else {
+ leafType := removeTrivialWrapperTypes(leaf.Type)
+ if canSSAType(leafType) {
+ for leafType.Etype == types.TSTRUCT && leafType.NumFields() == 1 {
+ // This may not be adequately general -- consider [1]etc but this is caused by immediate IDATA
+ leafType = leafType.Field(0).Type
+ }
+ pt := types.NewPtr(leafType)
+ off := offsetFrom(sp, offset+aux.OffsetOfResult(which), pt)
+ // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
+ if leaf.Block == call.Block {
+ leaf.reset(OpLoad)
+ leaf.SetArgs2(off, call)
+ leaf.Type = leafType
+ } else {
+ w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
+ leaf.copyOf(w)
+ }
+ } else {
+ f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
+ }
+ }
+ case OpStructSelect:
+ w := selector.Args[0]
+ var ls []LocalSlot
+ if w.Type.Etype != types.TSTRUCT {
+ f.Fatalf("Bad type for w: v=%v; sel=%v; w=%v; ,f=%s\n", leaf.LongString(), selector.LongString(), w.LongString(), f.Name)
+ // Artifact of immediate interface idata
+ ls = rewriteSelect(leaf, w, offset)
+ } else {
+ ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
+ for _, l := range ls {
+ locs = append(locs, f.fe.SplitStruct(l, int(selector.AuxInt)))
+ }
+ }
+
+ case OpArraySelect:
+ w := selector.Args[0]
+ rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt)
+
+ case OpInt64Hi:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset+hiOffset)
+ locs = splitSlots(ls, ".hi", hiOffset, leafType)
+
+ case OpInt64Lo:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset+lowOffset)
+ locs = splitSlots(ls, ".lo", lowOffset, leafType)
+
+ case OpStringPtr:
+ ls := rewriteSelect(leaf, selector.Args[0], offset)
+ locs = splitSlots(ls, ".ptr", 0, typ.BytePtr)
+ //for i := range ls {
+ // locs = append(locs, f.fe.SplitSlot(&ls[i], ".ptr", 0, typ.BytePtr))
+ //}
+ case OpSlicePtr:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset)
+ locs = splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
+
+ case OpITab:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset)
+ sfx := ".itab"
+ if w.Type.IsEmptyInterface() {
+ sfx = ".type"
+ }
+ locs = splitSlots(ls, sfx, 0, typ.Uintptr)
+
+ case OpComplexReal:
+ ls := rewriteSelect(leaf, selector.Args[0], offset)
+ locs = splitSlots(ls, ".real", 0, leafType)
+
+ case OpComplexImag:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part.
+ locs = splitSlots(ls, ".imag", leafType.Width, leafType)
+
+ case OpStringLen, OpSliceLen:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
+ locs = splitSlots(ls, ".len", ptrSize, leafType)
+
+ case OpIData:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
+ locs = splitSlots(ls, ".data", ptrSize, leafType)
+
+ case OpSliceCap:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+2*ptrSize)
+ locs = splitSlots(ls, ".cap", 2*ptrSize, leafType)
+
+ case OpCopy: // If it's an intermediate result, recurse
+ locs = rewriteSelect(leaf, selector.Args[0], offset)
+ for _, s := range namedSelects[selector] {
+ // this copy may have had its own name, preserve that, too.
+ locs = append(locs, f.Names[s.locIndex])
+ }
+
+ default:
+ // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
+ }
+
+ return locs
+ }
+
+ // storeArg converts stores of SSA-able aggregate arguments (passed to a call) into a series of stores of
+ // smaller types into individual parameter slots.
+ var storeArg func(pos src.XPos, b *Block, a *Value, t *types.Type, offset int64, mem *Value) *Value
+ storeArg = func(pos src.XPos, b *Block, a *Value, t *types.Type, offset int64, mem *Value) *Value {
+ if debug {
+ fmt.Printf("\tstoreArg(%s; %s; %v; %d; %s)\n", b, a.LongString(), t, offset, mem.String())
+ }
+
+ switch a.Op {
+ case OpArrayMake0, OpStructMake0:
+ return mem
+
+ case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ mem = storeArg(pos, b, a.Args[i], fld.Type, offset+fld.Offset, mem)
+ }
+ return mem
+
+ case OpArrayMake1:
+ return storeArg(pos, b, a.Args[0], t.Elem(), offset, mem)
+
+ case OpInt64Make:
+ tHi, tLo := intPairTypes(t.Etype)
+ mem = storeArg(pos, b, a.Args[0], tHi, offset+hiOffset, mem)
+ return storeArg(pos, b, a.Args[1], tLo, offset+lowOffset, mem)
+
+ case OpComplexMake:
+ tPart := typ.Float32
+ wPart := t.Width / 2
+ if wPart == 8 {
+ tPart = typ.Float64
+ }
+ mem = storeArg(pos, b, a.Args[0], tPart, offset, mem)
+ return storeArg(pos, b, a.Args[1], tPart, offset+wPart, mem)
+
+ case OpIMake:
+ mem = storeArg(pos, b, a.Args[0], typ.Uintptr, offset, mem)
+ return storeArg(pos, b, a.Args[1], typ.BytePtr, offset+ptrSize, mem)
+
+ case OpStringMake:
+ mem = storeArg(pos, b, a.Args[0], typ.BytePtr, offset, mem)
+ return storeArg(pos, b, a.Args[1], typ.Int, offset+ptrSize, mem)
+
+ case OpSliceMake:
+ mem = storeArg(pos, b, a.Args[0], typ.BytePtr, offset, mem)
+ mem = storeArg(pos, b, a.Args[1], typ.Int, offset+ptrSize, mem)
+ return storeArg(pos, b, a.Args[2], typ.Int, offset+2*ptrSize, mem)
+ }
+
+ dst := offsetFrom(sp, offset, types.NewPtr(t))
+ x := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, a, mem)
+ if debug {
+ fmt.Printf("\t\tstoreArg returns %s\n", x.LongString())
+ }
+ return x
+ }
+
+ // splitStore converts a store of an SSA-able aggregate into a series of smaller stores, emitting
+ // appropriate Struct/Array Select operations (which will soon go dead) to obtain the parts.
+ // This has to handle aggregate types that have already been lowered by an earlier phase.
+ var splitStore func(dest, source, mem, v *Value, t *types.Type, offset int64, firstStorePos src.XPos) *Value
+ splitStore = func(dest, source, mem, v *Value, t *types.Type, offset int64, firstStorePos src.XPos) *Value {
+ if debug {
+ fmt.Printf("\tsplitStore(%s; %s; %s; %s; %v; %d; %v)\n", dest.LongString(), source.LongString(), mem.String(), v.LongString(), t, offset, firstStorePos)
+ }
+ pos := v.Pos.WithNotStmt()
+ switch t.Etype {
+ case types.TARRAY:
+ elt := t.Elem()
+ if t.NumElem() == 1 && t.Width == regSize && elt.Width == regSize {
+ t = removeTrivialWrapperTypes(t)
+ if t.Etype == types.TSTRUCT || t.Etype == types.TARRAY {
+ f.Fatalf("Did not expect to find IDATA-immediate with non-trivial struct/array in it")
+ }
+ break // handle the leaf type.
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
+ mem = splitStore(dest, sel, mem, v, elt, offset+i*elt.Width, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ }
+ return mem
+
+ case types.TSTRUCT:
+ if t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize {
+ // This peculiar test deals with accesses to immediate interface data.
+ // It works okay because everything is the same size.
+ // Example code that triggers this can be found in go/constant/value.go, function ToComplex
+ // v119 (+881) = IData <intVal> v6
+ // v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
+ // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
+ // Guard against "struct{struct{*foo}}"
+ // Other rewriting phases create minor glitches when they transform IData, for instance the
+ // interface-typed Arg "x" of ToFloat in go/constant/value.go
+ // v6 (858) = Arg <Value> {x} (x[Value], x[Value])
+ // is rewritten by decomposeArgs into
+ // v141 (858) = Arg <uintptr> {x}
+ // v139 (858) = Arg <*uint8> {x} [8]
+ // because of a type case clause on line 862 of go/constant/value.go
+ // case intVal:
+ // return itof(x)
+ // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
+ // of a *uint8, which does not succeed.
+ t = removeTrivialWrapperTypes(t)
+
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return splitStore(dest, source, mem, v, t, offset, firstStorePos)
+ }
+
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
+ mem = splitStore(dest, sel, mem, v, fld.Type, offset+fld.Offset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ }
+ return mem
+
+ case types.TINT64, types.TUINT64:
+ if t.Width == regSize {
+ break
+ }
+ tHi, tLo := intPairTypes(t.Etype)
+ sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
+ mem = splitStore(dest, sel, mem, v, tHi, offset+hiOffset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
+ return splitStore(dest, sel, mem, v, tLo, offset+lowOffset, firstStorePos)
+
+ case types.TINTER:
+ sel := source.Block.NewValue1(pos, OpITab, typ.BytePtr, source)
+ mem = splitStore(dest, sel, mem, v, typ.BytePtr, offset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpIData, typ.BytePtr, source)
+ return splitStore(dest, sel, mem, v, typ.BytePtr, offset+ptrSize, firstStorePos)
+
+ case types.TSTRING:
+ sel := source.Block.NewValue1(pos, OpStringPtr, typ.BytePtr, source)
+ mem = splitStore(dest, sel, mem, v, typ.BytePtr, offset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpStringLen, typ.Int, source)
+ return splitStore(dest, sel, mem, v, typ.Int, offset+ptrSize, firstStorePos)
+
+ case types.TSLICE:
+ et := types.NewPtr(t.Elem())
+ sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
+ mem = splitStore(dest, sel, mem, v, et, offset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpSliceLen, typ.Int, source)
+ mem = splitStore(dest, sel, mem, v, typ.Int, offset+ptrSize, firstStorePos)
+ sel = source.Block.NewValue1(pos, OpSliceCap, typ.Int, source)
+ return splitStore(dest, sel, mem, v, typ.Int, offset+2*ptrSize, firstStorePos)
+
+ case types.TCOMPLEX64:
+ sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float32, source)
+ mem = splitStore(dest, sel, mem, v, typ.Float32, offset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float32, source)
+ return splitStore(dest, sel, mem, v, typ.Float32, offset+4, firstStorePos)
+
+ case types.TCOMPLEX128:
+ sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float64, source)
+ mem = splitStore(dest, sel, mem, v, typ.Float64, offset, firstStorePos)
+ firstStorePos = firstStorePos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float64, source)
+ return splitStore(dest, sel, mem, v, typ.Float64, offset+8, firstStorePos)
+ }
+ // Default, including for aggregates whose single element exactly fills their container
+ // TODO this will be a problem for cast interfaces containing floats when we move to registers.
+ x := v.Block.NewValue3A(firstStorePos, OpStore, types.TypeMem, t, offsetFrom(dest, offset, types.NewPtr(t)), source, mem)
+ if debug {
+ fmt.Printf("\t\tsplitStore returns %s\n", x.LongString())
+ }
+
+ return x
+ }
+
+ // rewriteArgs removes all the Args from a call and converts the call args into appropriate
+ // stores (or later, register movement). Extra args for interface and closure calls are ignored,
+ // but removed.
+ rewriteArgs := func(v *Value, firstArg int) *Value {
+ // Thread the stores on the memory arg
+ aux := v.Aux.(*AuxCall)
+ pos := v.Pos.WithNotStmt()
+ m0 := v.Args[len(v.Args)-1]
+ mem := m0
+ for i, a := range v.Args {
+ if i < firstArg {
+ continue
+ }
+ if a == m0 { // mem is last.
+ break
+ }
+ auxI := int64(i - firstArg)
+ if a.Op == OpDereference {
+ if a.MemoryArg() != m0 {
+ f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
+ }
+ // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
+ // TODO this will be more complicated with registers in the picture.
+ source := a.Args[0]
+ dst := f.ConstOffPtrSP(source.Type, aux.OffsetOfArg(auxI), sp)
+ if a.Uses == 1 && a.Block == v.Block {
+ a.reset(OpMove)
+ a.Pos = pos
+ a.Type = types.TypeMem
+ a.Aux = aux.TypeOfArg(auxI)
+ a.AuxInt = aux.SizeOfArg(auxI)
+ a.SetArgs3(dst, source, mem)
+ mem = a
+ } else {
+ mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, source, mem)
+ mem.AuxInt = aux.SizeOfArg(auxI)
+ }
+ } else {
+ if debug {
+ fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
+ }
+ mem = storeArg(pos, v.Block, a, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI), mem)
+ }
+ }
+ v.resetArgs()
+ return mem
+ }
+
+ // TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
+
+ // Step 0: rewrite the calls to convert incoming args to stores.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStaticLECall:
+ mem := rewriteArgs(v, 0)
+ v.SetArgs1(mem)
+ case OpClosureLECall:
+ code := v.Args[0]
+ context := v.Args[1]
+ mem := rewriteArgs(v, 2)
+ v.SetArgs3(code, context, mem)
+ case OpInterLECall:
+ code := v.Args[0]
+ mem := rewriteArgs(v, 1)
+ v.SetArgs2(code, mem)
+ }
+ }
+ }
+
+ for i, name := range f.Names {
+ t := name.Type
+ if isAlreadyExpandedAggregateType(t) {
+ for j, v := range f.NamedValues[name] {
+ if v.Op == OpSelectN {
+ ns := namedSelects[v]
+ namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
+ }
+ }
+ }
+ }
+
+ // Step 1: any stores of aggregates remaining are believed to be sourced from call results.
+ // Decompose those stores into a series of smaller stores, adding selection ops as necessary.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpStore {
+ t := v.Aux.(*types.Type)
+ iAEATt := isAlreadyExpandedAggregateType(t)
+ if !iAEATt {
+ // guarding against store immediate struct into interface data field -- store type is *uint8
+ // TODO can this happen recursively?
+ tSrc := v.Args[1].Type
+ iAEATt = isAlreadyExpandedAggregateType(tSrc)
+ if iAEATt {
+ t = tSrc
+ }
+ }
+ if iAEATt {
+ if debug {
+ fmt.Printf("Splitting store %s\n", v.LongString())
+ }
+ dst, source, mem := v.Args[0], v.Args[1], v.Args[2]
+ mem = splitStore(dst, source, mem, v, t, 0, v.Pos)
+ v.copyOf(mem)
+ }
+ }
+ }
+ }
+
+ val2Preds := make(map[*Value]int32) // Used to accumulate dependency graph of selection operations for topological ordering.
+
+ // Step 2: transform or accumulate selection operations for rewrite in topological order.
+ //
+ // Aggregate types that have already (in earlier phases) been transformed must be lowered comprehensively to finish
+ // the transformation (user-defined structs and arrays, slices, strings, interfaces, complex, 64-bit on 32-bit architectures),
+ //
+ // Any select-for-addressing applied to call results can be transformed directly.
for _, b := range f.Blocks {
for _, v := range b.Values {
+ // Accumulate chains of selectors for processing in topological order
switch v.Op {
+ case OpStructSelect, OpArraySelect,
+ OpIData, OpITab,
+ OpStringPtr, OpStringLen,
+ OpSlicePtr, OpSliceLen, OpSliceCap,
+ OpComplexReal, OpComplexImag,
+ OpInt64Hi, OpInt64Lo:
+ w := v.Args[0]
+ switch w.Op {
+ case OpStructSelect, OpArraySelect, OpSelectN:
+ val2Preds[w] += 1
+ if debug {
+ fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
+ }
+ }
+ fallthrough
+
case OpSelectN:
- call := v.Args[0]
- aux := call.Aux.(*AuxCall)
- which := v.AuxInt
- t := v.Type
- if which == aux.NResults() { // mem is after the results.
- // rewrite v as a Copy of call -- the replacement call will produce a mem.
- v.copyOf(call)
- } else {
- pt := types.NewPtr(t)
- if canSSAType(t) {
- off := f.ConstOffPtrSP(pt, aux.OffsetOfResult(which), sp)
- v.reset(OpLoad)
- v.SetArgs2(off, call)
- } else {
- panic("Should not have non-SSA-able OpSelectN")
+ if _, ok := val2Preds[v]; !ok {
+ val2Preds[v] = 0
+ if debug {
+ fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
}
}
- v.Type = t // not right for the mem operand yet, but will be when call is rewritten.
case OpSelectNAddr:
+ // Do these directly, there are no chains of selectors.
call := v.Args[0]
which := v.AuxInt
aux := call.Aux.(*AuxCall)
pt := v.Type
- off := f.ConstOffPtrSP(pt, aux.OffsetOfResult(which), sp)
+ off := offsetFrom(sp, aux.OffsetOfResult(which), pt)
v.copyOf(off)
}
}
}
- // Step 2: rewrite the calls
+ // Step 3: Compute topological order of selectors,
+ // then process it in reverse to eliminate duplicates,
+ // then forwards to rewrite selectors.
+ //
+ // All chains of selectors end up in same block as the call.
+ sdom := f.Sdom()
+
+ // Compilation must be deterministic, so sort after extracting first zeroes from map.
+ // Sorting allows dominators-last order within each batch,
+ // so that the backwards scan for duplicates will most often find copies from dominating blocks (it is best-effort).
+ var toProcess []*Value
+ less := func(i, j int) bool {
+ vi, vj := toProcess[i], toProcess[j]
+ bi, bj := vi.Block, vj.Block
+ if bi == bj {
+ return vi.ID < vj.ID
+ }
+ return sdom.domorder(bi) > sdom.domorder(bj) // reverse the order to put dominators last.
+ }
+
+ // Accumulate order in allOrdered
+ var allOrdered []*Value
+ for v, n := range val2Preds {
+ if n == 0 {
+ allOrdered = append(allOrdered, v)
+ }
+ }
+ last := 0 // allOrdered[0:last] has been top-sorted and processed
+ for len(val2Preds) > 0 {
+ toProcess = allOrdered[last:]
+ last = len(allOrdered)
+ sort.SliceStable(toProcess, less)
+ for _, v := range toProcess {
+ w := v.Args[0]
+ delete(val2Preds, v)
+ n, ok := val2Preds[w]
+ if !ok {
+ continue
+ }
+ if n == 1 {
+ allOrdered = append(allOrdered, w)
+ delete(val2Preds, w)
+ continue
+ }
+ val2Preds[w] = n - 1
+ }
+ }
+
+ common := make(map[selKey]*Value)
+ // Rewrite duplicate selectors as copies where possible.
+ for i := len(allOrdered) - 1; i >= 0; i-- {
+ v := allOrdered[i]
+ w := v.Args[0]
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ }
+ typ := v.Type
+ if typ.IsMemory() {
+ continue // handled elsewhere, not an indexable result
+ }
+ size := typ.Width
+ offset := int64(0)
+ switch v.Op {
+ case OpStructSelect:
+ if w.Type.Etype == types.TSTRUCT {
+ offset = w.Type.FieldOff(int(v.AuxInt))
+ } else { // Immediate interface data artifact, offset is zero.
+ f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
+ }
+ case OpArraySelect:
+ offset = size * v.AuxInt
+ case OpSelectN:
+ offset = w.Aux.(*AuxCall).OffsetOfResult(v.AuxInt)
+ case OpInt64Hi:
+ offset = hiOffset
+ case OpInt64Lo:
+ offset = lowOffset
+ case OpStringLen, OpSliceLen, OpIData:
+ offset = ptrSize
+ case OpSliceCap:
+ offset = 2 * ptrSize
+ case OpComplexImag:
+ offset = size
+ }
+ sk := selKey{from: w, size: size, offset: offset, typ: typ.Etype}
+ dupe := common[sk]
+ if dupe == nil {
+ common[sk] = v
+ } else if sdom.IsAncestorEq(dupe.Block, v.Block) {
+ v.copyOf(dupe)
+ } else {
+ // Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
+ // Installing the new value might match some future values.
+ common[sk] = v
+ }
+ }
+
+ // Indices of entries in f.Names that need to be deleted.
+ var toDelete []namedVal
+
+ // Rewrite selectors.
+ for i, v := range allOrdered {
+ if debug {
+ b := v.Block
+ fmt.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
+ }
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ continue
+ }
+ if v.Op == OpCopy {
+ continue
+ }
+ locs := rewriteSelect(v, v, 0)
+ // Install new names.
+ if v.Type.IsMemory() {
+ continue
+ }
+ // Leaf types may have debug locations
+ if !isAlreadyExpandedAggregateType(v.Type) {
+ for _, l := range locs {
+ f.NamedValues[l] = append(f.NamedValues[l], v)
+ }
+ f.Names = append(f.Names, locs...)
+ continue
+ }
+ // Not-leaf types that had debug locations need to lose them.
+ if ns, ok := namedSelects[v]; ok {
+ toDelete = append(toDelete, ns...)
+ }
+ }
+
+ deleteNamedVals(f, toDelete)
+
+ // Step 4: rewrite the calls themselves, correcting the type
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
case OpStaticLECall:
- // Thread the stores on the memory arg
- m0 := v.Args[len(v.Args)-1]
- mem := m0
- pos := v.Pos.WithNotStmt()
- aux := v.Aux.(*AuxCall)
- auxInt := v.AuxInt
- for i, a := range v.Args {
- if a == m0 {
- break
- }
- if a.Op == OpDereference {
- // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
- src := a.Args[0]
- dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(int64(i)), sp)
- a.reset(OpMove)
- a.Pos = pos
- a.Type = types.TypeMem
- a.Aux = aux.TypeOfArg(int64(i))
- a.AuxInt = aux.SizeOfArg(int64(i))
- a.SetArgs3(dst, src, mem)
- mem = a
- } else {
- // Add a new store.
- t := aux.TypeOfArg(int64(i))
- dst := f.ConstOffPtrSP(types.NewPtr(t), aux.OffsetOfArg(int64(i)), sp)
- mem = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, a, mem)
- }
- }
- v.reset(OpStaticCall)
+ v.Op = OpStaticCall
v.Type = types.TypeMem
- v.Aux = aux
- v.AuxInt = auxInt
- v.SetArgs1(mem)
+ case OpClosureLECall:
+ v.Op = OpClosureCall
+ v.Type = types.TypeMem
+ case OpInterLECall:
+ v.Op = OpInterCall
+ v.Type = types.TypeMem
+ }
+ }
+ }
+
+ // Step 5: elide any copies introduced.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ aa := copySource(a)
+ v.SetArg(i, aa)
+ for a.Uses == 0 {
+ b := a.Args[0]
+ a.reset(OpInvalid)
+ a = b
+ }
}
}
}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 51665c60e2..b4c3e5cfdf 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -125,6 +125,10 @@ func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
}
+
+func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+ return LocalSlot{N: parent.N, Type: t, Off: offset}
+}
func (DummyFrontend) Line(_ src.XPos) string {
return "unknown.go:0"
}
diff --git a/src/cmd/compile/internal/ssa/flagalloc.go b/src/cmd/compile/internal/ssa/flagalloc.go
index d50b615912..61c45a6be7 100644
--- a/src/cmd/compile/internal/ssa/flagalloc.go
+++ b/src/cmd/compile/internal/ssa/flagalloc.go
@@ -191,11 +191,6 @@ func flagalloc(f *Func) {
b.FlagsLiveAtEnd = end[b.ID] != nil
}
- const go115flagallocdeadcode = true
- if !go115flagallocdeadcode {
- return
- }
-
// Remove any now-dead values.
// The number of values to remove is likely small,
// and removing them requires processing all values in a block,
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 0df7b4a5d7..ec2c67c1fa 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -672,7 +672,7 @@ func (f *Func) Idom() []*Block {
return f.cachedIdom
}
-// sdom returns a sparse tree representing the dominator relationships
+// Sdom returns a sparse tree representing the dominator relationships
// among the blocks of f.
func (f *Func) Sdom() SparseTree {
if f.cachedSdom == nil {
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 4a8244eb27..4e6cc8c692 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -38,10 +38,8 @@
(Xor(32|16|8) ...) => (XORL ...)
(Neg(32|16|8) ...) => (NEGL ...)
-(Neg32F x) && !config.use387 => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
-(Neg64F x) && !config.use387 => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
-(Neg32F x) && config.use387 => (FCHS x)
-(Neg64F x) && config.use387 => (FCHS x)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
(Com(32|16|8) ...) => (NOTL ...)
@@ -312,7 +310,7 @@
(Const32 ...) => (MOVLconst ...)
(Const(32|64)F ...) => (MOVS(S|D)const ...)
(ConstNil) => (MOVLconst [0])
-(ConstBool [c]) => (MOVLconst [int32(b2i(c))])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
// Lowering calls
(StaticCall ...) => (CALLstatic ...)
@@ -670,8 +668,8 @@
// Merge load/store to op
((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
-((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
-((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go
index ddabde7d3d..737b99c371 100644
--- a/src/cmd/compile/internal/ssa/gen/386Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/386Ops.go
@@ -51,17 +51,6 @@ var regNames386 = []string{
"SB",
}
-// Notes on 387 support.
-// - The 387 has a weird stack-register setup for floating-point registers.
-// We use these registers when SSE registers are not available (when GO386=387).
-// - We use the same register names (X0-X7) but they refer to the 387
-// floating-point registers. That way, most of the SSA backend is unchanged.
-// - The instruction generation pass maintains an SSE->387 register mapping.
-// This mapping is updated whenever the FP stack is pushed or popped so that
-// we can always find a given SSE register even when the TOS pointer has changed.
-// - To facilitate the mapping from SSE to 387, we enforce that
-// every basic block starts and ends with an empty floating-point stack.
-
func init() {
// Make map from reg names to reg integers.
if len(regNames386) > 64 {
@@ -552,9 +541,6 @@ func init() {
{name: "FlagGT_UGT"}, // signed > and unsigned <
{name: "FlagGT_ULT"}, // signed > and unsigned >
- // Special op for -x on 387
- {name: "FCHS", argLength: 1, reg: fp11},
-
// Special ops for PIC floating-point constants.
// MOVSXconst1 loads the address of the constant-pool entry into a register.
// MOVSXconst2 loads the constant from that address.
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 408678f054..934e7dfdb6 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -401,7 +401,7 @@
(Const32F ...) => (MOVSSconst ...)
(Const64F ...) => (MOVSDconst ...)
(ConstNil ) => (MOVQconst [0])
-(ConstBool [c]) => (MOVLconst [int32(b2i(c))])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
// Lowering calls
(StaticCall ...) => (CALLstatic ...)
@@ -530,8 +530,10 @@
(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
// Atomic memory updates.
-(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
-(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
+(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
+(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
+(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
+(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
// Write barrier.
(WB ...) => (LoweredWB ...)
@@ -957,7 +959,7 @@
(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
-(MUL(Q|L)const [c] x) && isPowerOfTwo(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log2(int64(c)+1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log2(int64(c)+1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
@@ -1274,8 +1276,8 @@
(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
-(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= m && int16(m) < n => (FlagLT_ULT)
-(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
// TESTQ c c sets flags like CMPQ c 0.
(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 2df5016d59..de5372670b 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -902,7 +902,9 @@ func init() {
// Atomic memory updates.
{name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
{name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+ {name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
}
var AMD64blocks = []blockData{
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 9490805f46..f48abcd202 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -169,10 +169,10 @@
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
// constants
-(Const(8|16|32) ...) -> (MOVWconst ...)
-(Const(32F|64F) ...) -> (MOV(F|D)const ...)
+(Const(8|16|32) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
(ConstNil) => (MOVWconst [0])
-(ConstBool ...) -> (MOVWconst ...)
+(ConstBool [b]) => (MOVWconst [b2i32(b)])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
@@ -243,10 +243,10 @@
(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Leq32U x y) => (LessEqualU (CMP x y))
-(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
-(OffPtr [off] ptr) -> (ADDconst [off] ptr)
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
-(Addr ...) -> (MOVWaddr ...)
+(Addr {sym} base) => (MOVWaddr {sym} base)
(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
// loads
@@ -1052,8 +1052,8 @@
(BICshiftRL x (MOVWconst [c]) [d]) => (BICconst x [int32(uint32(c)>>uint64(d))])
(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)])
(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c<<uint64(d))])
-(MVNshiftRL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)>>uint64(d))])
-(MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) => (MOVWconst [^int32(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) => (MOVWconst [int32(c)>>uint64(d)])
(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c<<uint64(d)])
(CMPshiftRL x (MOVWconst [c]) [d]) => (CMPconst x [int32(uint32(c)>>uint64(d))])
(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
@@ -1190,12 +1190,12 @@
(MOVWstoreidx ptr (SRAconst idx [c]) val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
(MOVWstoreidx (SRAconst idx [c]) ptr val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
-(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
-(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem)
-(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
-(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem)
// generic simplifications
@@ -1263,8 +1263,8 @@
(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
// comparison simplification
-(CMP x (RSBconst [0] y)) => (CMN x y)
-(CMN x (RSBconst [0] y)) => (CMP x y)
+((LT|LE|EQ|NE|GE|GT) (CMP x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMN x y)) // sense of carry bit not preserved
+((LT|LE|EQ|NE|GE|GT) (CMN x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMP x y)) // sense of carry bit not preserved
(EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no)
(EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
(EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no)
@@ -1470,6 +1470,6 @@
(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GE (TEQshiftRLreg x y z) yes no)
(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GE (TEQshiftRAreg x y z) yes no)
-(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read8(sym, off))])
-(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVWconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index c4a3532632..c50a8c7778 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -548,8 +548,10 @@
(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
// Currently the updated value is not used, but we need a register to temporarily hold it.
-(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
-(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
+(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
+(AtomicAnd32 ptr val mem) => (Select1 (LoweredAtomicAnd32 ptr val mem))
+(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
+(AtomicOr32 ptr val mem) => (Select1 (LoweredAtomicOr32 ptr val mem))
(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
@@ -1171,145 +1173,145 @@
(MUL x (MOVDconst [-1])) => (NEG x)
(MUL _ (MOVDconst [0])) => (MOVDconst [0])
(MUL x (MOVDconst [1])) => x
-(MUL x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x)
-(MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (ADDshiftLL x x [log2(c-1)])
-(MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
-(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log2(c)] x)
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log2(c-1)])
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
(MULW x (MOVDconst [c])) && int32(c)==1 => x
-(MULW x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x)
-(MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log2(c-1)])
-(MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
-(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log2(c)] x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log2(c-1)])
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
// mneg by constant
(MNEG x (MOVDconst [-1])) => x
(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
(MNEG x (MOVDconst [1])) => (NEG x)
-(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
-(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
-(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
-(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
-(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
-(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
+(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
-(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
-(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
-(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
-(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log2(c)] x))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
(MADD a x (MOVDconst [-1])) => (SUB a x)
(MADD a _ (MOVDconst [0])) => a
(MADD a x (MOVDconst [1])) => (ADD a x)
-(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
-(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log2(c)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADD a (MOVDconst [-1]) x) => (SUB a x)
(MADD a (MOVDconst [0]) _) => a
(MADD a (MOVDconst [1]) x) => (ADD a x)
-(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
-(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log2(c)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log2(c)])
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)])
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log2(c)])
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a x (MOVDconst [-1])) => (ADD a x)
(MSUB a _ (MOVDconst [0])) => a
(MSUB a x (MOVDconst [1])) => (SUB a x)
-(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
-(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log2(c)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUB a (MOVDconst [-1]) x) => (ADD a x)
(MSUB a (MOVDconst [0]) _) => a
(MSUB a (MOVDconst [1]) x) => (SUB a x)
-(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
-(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log2(c)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log2(c)])
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)])
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
-(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
-(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
-(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
-(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
-(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log2(c)])
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
// div by constant
(UDIV x (MOVDconst [1])) => x
-(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log2(c)] x)
+(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log2(c)] x)
(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
-(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log2(c)] x)
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log2(c)] x)
(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
-(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
+(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
-(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
+(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
// generic simplifications
(ADD x (NEG y)) => (SUB x y)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index 9ff53f7e4e..fe9edbf933 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -656,12 +656,14 @@ func init() {
// atomic and/or.
// *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
- // LDAXRB (Rarg0), Rout
+ // LDAXR (Rarg0), Rout
// AND/OR Rarg1, Rout
- // STLXRB Rout, (Rarg0), Rtmp
+ // STLXR Rout, (Rarg0), Rtmp
// CBNZ Rtmp, -3(PC)
{name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
{name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// It saves all GP registers if necessary,
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
index 96feaf9234..b6e5312224 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -143,7 +143,7 @@
(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
(Const(32|64)F ...) => (MOV(F|D)const ...)
(ConstNil) => (MOVWconst [0])
-(ConstBool [b]) => (MOVWconst [int32(b2i(b))])
+(ConstBool [b]) => (MOVWconst [b2i32(b)])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
@@ -383,6 +383,9 @@
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr)))))) mem)
+(AtomicAnd32 ...) => (LoweredAtomicAnd ...)
+(AtomicOr32 ...) => (LoweredAtomicOr ...)
+
// checks
(NilCheck ...) => (LoweredNilCheck ...)
@@ -581,13 +584,13 @@
(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
-(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
-(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
+(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
(MUL (MOVWconst [1]) x ) => x
(MUL (MOVWconst [-1]) x ) => (NEG x)
-(MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
// generic simplifications
(ADD x (NEG y)) => (SUB x y)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
index e008ec8703..8e4c3a07c8 100644
--- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -580,13 +580,13 @@
(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
(Select1 (MULVU x (MOVVconst [1]))) => x
-(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SLLVconst [log2(c)] x)
+(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log2(c)] x)
// div by constant
(Select1 (DIVVU x (MOVVconst [1]))) => x
-(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SRLVconst [log2(c)] x)
+(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log2(c)] x)
(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
-(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
+(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
// generic simplifications
(ADDV x (NEGV y)) => (SUBV x y)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index de30d003e6..558b09c9f2 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -150,6 +150,31 @@
(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
+// Combine rotate and mask operations
+(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+
+// Note, any rotated word bitmask is still a valid word bitmask.
+(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+
+(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+
+// Merge shift right + shift left and clear left (e.g for a table lookup)
+(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
+// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
+(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
// large constant shifts
(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
@@ -821,6 +846,8 @@
(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
+(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
+
// Subtract from (with carry, but ignored) constant.
// Note, these clobber the carry bit.
(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
@@ -965,10 +992,10 @@
// atomic intrinsics
(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
-(AtomicLoadAcq32 ptr mem) => (LoweredAtomicLoad32 [0] ptr mem)
+(AtomicLoadAcq(32|64) ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
-(AtomicStoreRel32 ptr val mem) => (LoweredAtomicStore32 [0] ptr val mem)
+(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
//(AtomicStorePtrNoWB ptr val mem) => (STLR ptr val mem)
(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
@@ -978,8 +1005,10 @@
(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
-(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
-(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
+(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
+(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
+(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
+(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
@@ -1018,13 +1047,12 @@
(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
-(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
-(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
-(SLWconst [c] z:(MOVWZreg x)) && z.Uses == 1 && c < 24 => (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x)
-(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
-(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
// special case for power9
(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && objabi.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index 28317928a8..f7198b90c3 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -137,6 +137,7 @@ func init() {
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}}
gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
gp32 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
@@ -181,6 +182,8 @@ func init() {
{name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
{name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
{name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit)
{name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed
@@ -225,6 +228,10 @@ func init() {
{name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
{name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
+ {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
+ {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
+ {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+
{name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros
{name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit)
@@ -600,25 +607,22 @@ func init() {
{name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
// atomic add32, 64
- // SYNC
+ // LWSYNC
// LDAR (Rarg0), Rout
// ADD Rarg1, Rout
// STDCCC Rout, (Rarg0)
// BNE -3(PC)
- // ISYNC
// return new sum
-
{name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic exchange32, 64
- // SYNC
+ // LWSYNC
// LDAR (Rarg0), Rout
// STDCCC Rarg1, (Rarg0)
// BNE -2(PC)
// ISYNC
// return old val
-
{name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
@@ -641,15 +645,16 @@ func init() {
{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
- // atomic 8 and/or.
+ // atomic 8/32 and/or.
// *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
- // LBAR (Rarg0), Rtmp
+ // LBAR/LWAT (Rarg0), Rtmp
// AND/OR Rarg1, Rtmp
- // STBCCC Rtmp, (Rarg0), Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp
// BNE Rtmp, -3(PC)
-
{name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and its arguments R20 and R21,
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
index 9437c8e9d4..325cbeb825 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -3,11 +3,7 @@
// license that can be found in the LICENSE file.
// Optimizations TODO:
-// * Somehow track when values are already zero/signed-extended, avoid re-extending.
// * Use SLTI and SLTIU for comparisons to constants, instead of SLT/SLTU with constants in registers
-// * Find a more efficient way to do zero/sign extension than left+right shift.
-// There are many other options (store then load-extend, LUI+ANDI for zero extend, special case 32->64, ...),
-// but left+right shift is simple and uniform, and we don't have real hardware to do perf testing on anyway.
// * Use the zero register instead of moving 0 into a register.
// * Add rules to avoid generating a temp bool value for (If (SLT[U] ...) ...).
// * Optimize left and right shift by simplifying SLTIU, Neg, and ADD for constants.
@@ -66,8 +62,8 @@
(Mod32u ...) => (REMUW ...)
(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
-(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
-(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
(And64 ...) => (AND ...)
(And32 ...) => (AND ...)
@@ -98,25 +94,21 @@
(Sqrt ...) => (FSQRTD ...)
-// Zero and sign extension
-// Shift left until the bits we want are at the top of the register.
-// Then logical/arithmetic shift right for zero/sign extend.
-// We always extend to 64 bits; there's no reason not to,
-// and optimization rules can then collapse some extensions.
-
-(SignExt8to16 <t> x) => (SRAI [56] (SLLI <t> [56] x))
-(SignExt8to32 <t> x) => (SRAI [56] (SLLI <t> [56] x))
-(SignExt8to64 <t> x) => (SRAI [56] (SLLI <t> [56] x))
-(SignExt16to32 <t> x) => (SRAI [48] (SLLI <t> [48] x))
-(SignExt16to64 <t> x) => (SRAI [48] (SLLI <t> [48] x))
-(SignExt32to64 <t> x) => (ADDIW [0] x)
-
-(ZeroExt8to16 <t> x) => (SRLI [56] (SLLI <t> [56] x))
-(ZeroExt8to32 <t> x) => (SRLI [56] (SLLI <t> [56] x))
-(ZeroExt8to64 <t> x) => (SRLI [56] (SLLI <t> [56] x))
-(ZeroExt16to32 <t> x) => (SRLI [48] (SLLI <t> [48] x))
-(ZeroExt16to64 <t> x) => (SRLI [48] (SLLI <t> [48] x))
-(ZeroExt32to64 <t> x) => (SRLI [32] (SLLI <t> [32] x))
+// Sign and zero extension.
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
(Cvt32to32F ...) => (FCVTSW ...)
(Cvt32to64F ...) => (FCVTDW ...)
@@ -261,16 +253,16 @@
(EqPtr x y) => (SEQZ (SUB <x.Type> x y))
(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
(Eq32 x y) => (SEQZ (SUBW <x.Type> x y))
-(Eq16 x y) => (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
-(Eq8 x y) => (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
+(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
(Eq64F ...) => (FEQD ...)
(Eq32F ...) => (FEQS ...)
(NeqPtr x y) => (SNEZ (SUB <x.Type> x y))
(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
(Neq32 x y) => (SNEZ (SUBW <x.Type> x y))
-(Neq16 x y) => (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
-(Neq8 x y) => (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
+(Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Neq8 x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
(Neq64F ...) => (FNED ...)
(Neq32F ...) => (FNES ...)
@@ -291,8 +283,8 @@
(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
// knows what variables are being read/written by the ops.
@@ -368,6 +360,13 @@
(Zero [4] ptr mem) => (MOVWstore ptr (MOVWconst) mem)
(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst) mem)
+// Medium zeroing uses a Duff's device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s >= 16 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
// Generic zeroing uses a loop
(Zero [s] {t} ptr mem) =>
(LoweredZero [t.Alignment()]
@@ -403,6 +402,13 @@
(Move [4] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+// Medium move uses a Duff's device
+// 16 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s >= 16 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+
// Generic move uses a loop
(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
(LoweredMove [t.Alignment()]
@@ -501,6 +507,65 @@
(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+// Avoid sign/zero extension after properly typed load.
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+
+// Fold double extensions.
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// Do not extend before store.
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// Replace extend after load with alternate load where possible.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+
+// If a register move has only 1 use, just use the same register without emitting instruction
+// MOVnop does not emit an instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
// Fold constant into immediate instructions where possible.
(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
index b06b86075e..f64319230b 100644
--- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -24,10 +24,11 @@ import (
// L = 64 bit int, used when the opcode starts with F
const (
- riscv64REG_G = 4
+ riscv64REG_G = 27
riscv64REG_CTXT = 20
riscv64REG_LR = 1
riscv64REG_SP = 2
+ riscv64REG_TP = 4
riscv64REG_TMP = 31
riscv64REG_ZERO = 0
)
@@ -78,8 +79,8 @@ func init() {
// Add general purpose registers to gpMask.
switch r {
- // ZERO, and TMP are not in any gp mask.
- case riscv64REG_ZERO, riscv64REG_TMP:
+ // ZERO, TP and TMP are not in any gp mask.
+ case riscv64REG_ZERO, riscv64REG_TP, riscv64REG_TMP:
case riscv64REG_G:
gpgMask |= mask
gpspsbgMask |= mask
@@ -192,6 +193,17 @@ func init() {
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
{name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+ // Conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOV"}, // move from arg0
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
+
// Shift ops
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
@@ -228,6 +240,44 @@ func init() {
{name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+ // duffzero
+ // arg0 = address of memory to zero (in X10, changed as side effect)
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X10"]},
+ clobbers: regNamed["X1"] | regNamed["X10"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in X11, changed as side effect)
+ // arg1 = address of src memory (in X10, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X11"], regNamed["X10"]},
+ clobbers: regNamed["X1"] | regNamed["X10"] | regNamed["X11"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
// Generic moves and zeros
// general unaligned zeroing
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index e564f638d3..2d6f091a4e 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -198,6 +198,9 @@
(RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
mem)
+(AtomicAnd32 ...) => (LAN ...)
+(AtomicOr32 ...) => (LAO ...)
+
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to(16|32|64) ...) => (MOVBreg ...)
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index 417b33cf91..728cfb5508 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -547,8 +547,10 @@ func init() {
// Atomic bitwise operations.
// Note: 'floor' operations round the pointer down to the nearest word boundary
// which reflects how they are used in the runtime.
- {name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
+ {name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
{name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem.
+ {name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
+ {name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
// Compare and swap.
// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
index 4f9e863f90..07607960fa 100644
--- a/src/cmd/compile/internal/ssa/gen/dec64.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -9,7 +9,6 @@
(Int64Hi (Int64Make hi _)) => hi
(Int64Lo (Int64Make _ lo)) => lo
-
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() =>
(Int64Make
(Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem)
@@ -143,6 +142,10 @@
(Trunc64to32 (Int64Make _ lo)) => lo
(Trunc64to16 (Int64Make _ lo)) => (Trunc32to16 lo)
(Trunc64to8 (Int64Make _ lo)) => (Trunc32to8 lo)
+// Most general
+(Trunc64to32 x) => (Int64Lo x)
+(Trunc64to16 x) => (Trunc32to16 (Int64Lo x))
+(Trunc64to8 x) => (Trunc32to8 (Int64Lo x))
(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask x)
@@ -175,156 +178,174 @@
// turn x64 non-constant shifts to x32 shifts
// if high 32-bit of the shift is nonzero, make a huge shift
(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
- (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+
+// Most general
+(Lsh64x64 x y) => (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64x64 x y) => (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64Ux64 x y) => (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh32x64 x y) => (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32x64 x y) => (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32Ux64 x y) => (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh16x64 x y) => (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16x64 x y) => (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16Ux64 x y) => (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh8x64 x y) => (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8x64 x y) => (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+
+// Clean up constants a little
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c == 0 => y
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c != 0 => (Const32 <typ.UInt32> [-1])
// 64x left shift
// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
// result.lo = lo<<s
-(Lsh64x32 (Int64Make hi lo) s) =>
+(Lsh64x32 x s) =>
(Int64Make
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Lsh32x32 <typ.UInt32> hi s)
+ (Lsh32x32 <typ.UInt32> (Int64Hi x) s)
(Rsh32Ux32 <typ.UInt32>
- lo
+ (Int64Lo x)
(Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
(Lsh32x32 <typ.UInt32>
- lo
+ (Int64Lo x)
(Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))))
- (Lsh32x32 <typ.UInt32> lo s))
-(Lsh64x16 (Int64Make hi lo) s) =>
+ (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x16 x s) =>
(Int64Make
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Lsh32x16 <typ.UInt32> hi s)
+ (Lsh32x16 <typ.UInt32> (Int64Hi x) s)
(Rsh32Ux16 <typ.UInt32>
- lo
+ (Int64Lo x)
(Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
(Lsh32x16 <typ.UInt32>
- lo
+ (Int64Lo x)
(Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))))
- (Lsh32x16 <typ.UInt32> lo s))
-(Lsh64x8 (Int64Make hi lo) s) =>
+ (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x8 x s) =>
(Int64Make
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Lsh32x8 <typ.UInt32> hi s)
+ (Lsh32x8 <typ.UInt32> (Int64Hi x) s)
(Rsh32Ux8 <typ.UInt32>
- lo
+ (Int64Lo x)
(Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
(Lsh32x8 <typ.UInt32>
- lo
+ (Int64Lo x)
(Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))))
- (Lsh32x8 <typ.UInt32> lo s))
+ (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
// 64x unsigned right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
-(Rsh64Ux32 (Int64Make hi lo) s) =>
+(Rsh64Ux32 x s) =>
(Int64Make
- (Rsh32Ux32 <typ.UInt32> hi s)
+ (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Rsh32Ux32 <typ.UInt32> lo s)
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
(Lsh32x32 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
(Rsh32Ux32 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
-(Rsh64Ux16 (Int64Make hi lo) s) =>
+(Rsh64Ux16 x s) =>
(Int64Make
- (Rsh32Ux16 <typ.UInt32> hi s)
+ (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Rsh32Ux16 <typ.UInt32> lo s)
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
(Lsh32x16 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
(Rsh32Ux16 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
-(Rsh64Ux8 (Int64Make hi lo) s) =>
+(Rsh64Ux8 x s) =>
(Int64Make
- (Rsh32Ux8 <typ.UInt32> hi s)
+ (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Rsh32Ux8 <typ.UInt32> lo s)
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
(Lsh32x8 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
(Rsh32Ux8 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
// 64x signed right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
-(Rsh64x32 (Int64Make hi lo) s) =>
+(Rsh64x32 x s) =>
(Int64Make
- (Rsh32x32 <typ.UInt32> hi s)
+ (Rsh32x32 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Rsh32Ux32 <typ.UInt32> lo s)
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
(Lsh32x32 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
(And32 <typ.UInt32>
(Rsh32x32 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))
(Zeromask
(Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
-(Rsh64x16 (Int64Make hi lo) s) =>
+(Rsh64x16 x s) =>
(Int64Make
- (Rsh32x16 <typ.UInt32> hi s)
+ (Rsh32x16 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Rsh32Ux16 <typ.UInt32> lo s)
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
(Lsh32x16 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
(And32 <typ.UInt32>
(Rsh32x16 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))
(Zeromask
(ZeroExt16to32
(Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
-(Rsh64x8 (Int64Make hi lo) s) =>
+(Rsh64x8 x s) =>
(Int64Make
- (Rsh32x8 <typ.UInt32> hi s)
+ (Rsh32x8 <typ.UInt32> (Int64Hi x) s)
(Or32 <typ.UInt32>
(Or32 <typ.UInt32>
- (Rsh32Ux8 <typ.UInt32> lo s)
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
(Lsh32x8 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
(And32 <typ.UInt32>
(Rsh32x8 <typ.UInt32>
- hi
+ (Int64Hi x)
(Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))
(Zeromask
(ZeroExt8to32
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 39f8cc8889..4351ef5bdd 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -1961,6 +1961,31 @@
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> (Invalid)
+// for late-expanded calls
+(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
+(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
// Evaluate constant address comparisons.
(EqPtr x x) => (ConstBool [true])
(NeqPtr x x) => (ConstBool [false])
@@ -2017,6 +2042,17 @@
&& clobber(s1, s2, s3)
=> (Move {t.Elem()} [int64(sz)] dst src mem)
+// Inline small or disjoint runtime.memmove calls with constant length.
+// See the comment in op Move in genericOps.go for discussion of the type.
+(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem))
+ && sz >= 0
+ && call.Uses == 1 // this will exclude all calls with results
+ && isSameCall(sym, "runtime.memmove")
+ && dst.Type.IsPtr() // avoids TUINTPTR, see issue 30061
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(call)
+ => (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
+
// De-virtualize interface calls into static calls.
// Note that (ITab (IMake)) doesn't get
// rewritten until after the first opt pass,
@@ -2024,6 +2060,13 @@
(InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, auxCall, itab, off) != nil =>
(StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem)
+// De-virtualize late-expanded interface calls into late-expanded static calls.
+// Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass,
+// so this rule should trigger reliably.
+// devirtLECall removes the first argument, adds the devirtualized symbol to the AuxCall, and changes the opcode
+(InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___) && devirtLESym(v, auxCall, itab, off) !=
+ nil => devirtLECall(v, devirtLESym(v, auxCall, itab, off))
+
// Move and Zero optimizations.
// Move source and destination may overlap.
@@ -2404,6 +2447,7 @@
(Store {t5} (OffPtr <tt5> [o5] dst) d4
(Zero {t1} [n] dst mem)))))
+// TODO this does not fire before call expansion; is that acceptable?
(StaticCall {sym} x) && needRaceCleanup(sym, v) => x
// Collapse moving A -> B -> C into just A -> C.
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 95edff4c8c..23bd4af2cd 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -389,10 +389,12 @@ var genericOps = []opData{
// TODO(josharian): ClosureCall and InterCall should have Int32 aux
// to match StaticCall's 32 bit arg size limit.
// TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff?
- {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
- {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory.
- {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
- {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
+ {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory.
+ {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
+ {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
// Conversions: signed extensions, zero (unsigned) extensions, truncations
{name: "SignExt8to16", argLength: 1, typ: "Int16"},
@@ -539,20 +541,22 @@ var genericOps = []opData{
{name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=tuple, auxint=field index. Returns the auxint'th member.
{name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=tuple, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types.
- // Atomic operations used for semantically inlining runtime/internal/atomic.
- // Atomic loads return a new memory so that the loads are properly ordered
- // with respect to other loads and stores.
- // TODO: use for sync/atomic at some point.
+ // Atomic operations used for semantically inlining sync/atomic and
+ // runtime/internal/atomic. Atomic loads return a new memory so that
+ // the loads are properly ordered with respect to other loads and
+ // stores.
{name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicLoadAcq64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
{name: "AtomicStore8", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
{name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicStoreRel64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
{name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
{name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
@@ -561,7 +565,9 @@ var genericOps = []opData{
{name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
{name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
{name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
{name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
// Atomic operation variants
// These variants have the same semantics as above atomic operations.
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index be51a7c5f8..120ccbbdb3 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -35,8 +35,7 @@ import (
)
// rule syntax:
-// sexpr [&& extra conditions] -> [@block] sexpr (untyped)
-// sexpr [&& extra conditions] => [@block] sexpr (typed)
+// sexpr [&& extra conditions] => [@block] sexpr
//
// sexpr are s-expressions (lisp-like parenthesized groupings)
// sexpr ::= [variable:](opcode sexpr*)
@@ -50,8 +49,12 @@ import (
// variable ::= some token
// opcode ::= one of the opcodes from the *Ops.go files
+// special rules: trailing ellipsis "..." (in the outermost sexpr?) must match on both sides of a rule.
+// trailing three underscore "___" in the outermost match sexpr indicate the presence of
+// extra ignored args that need not appear in the replacement
+
// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
-// variables declared in the matching sexpr. The variable "v" is predefined to be
+// variables declared in the matching tsexpr. The variable "v" is predefined to be
// the value matched by the entire rule.
// If multiple rules match, the first one in file order is selected.
@@ -75,14 +78,8 @@ func normalizeSpaces(s string) string {
}
// parse returns the matching part of the rule, additional conditions, and the result.
-// parse also reports whether the generated code should use strongly typed aux and auxint fields.
-func (r Rule) parse() (match, cond, result string, typed bool) {
- arrow := "->"
- if strings.Contains(r.Rule, "=>") {
- arrow = "=>"
- typed = true
- }
- s := strings.Split(r.Rule, arrow)
+func (r Rule) parse() (match, cond, result string) {
+ s := strings.Split(r.Rule, "=>")
match = normalizeSpaces(s[0])
result = normalizeSpaces(s[1])
cond = ""
@@ -90,7 +87,7 @@ func (r Rule) parse() (match, cond, result string, typed bool) {
cond = normalizeSpaces(match[i+2:])
match = normalizeSpaces(match[:i])
}
- return match, cond, result, typed
+ return match, cond, result
}
func genRules(arch arch) { genRulesSuffix(arch, "") }
@@ -116,7 +113,7 @@ func genRulesSuffix(arch arch, suff string) {
scanner := bufio.NewScanner(text)
rule := ""
var lineno int
- var ruleLineno int // line number of "->" or "=>"
+ var ruleLineno int // line number of "=>"
for scanner.Scan() {
lineno++
line := scanner.Text()
@@ -130,13 +127,13 @@ func genRulesSuffix(arch arch, suff string) {
if rule == "" {
continue
}
- if !strings.Contains(rule, "->") && !strings.Contains(rule, "=>") {
+ if !strings.Contains(rule, "=>") {
continue
}
if ruleLineno == 0 {
ruleLineno = lineno
}
- if strings.HasSuffix(rule, "->") || strings.HasSuffix(rule, "=>") {
+ if strings.HasSuffix(rule, "=>") {
continue // continue on the next line
}
if n := balance(rule); n > 0 {
@@ -153,7 +150,7 @@ func genRulesSuffix(arch arch, suff string) {
continue
}
// Do fancier value op matching.
- match, _, _, _ := r.parse()
+ match, _, _ := r.parse()
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
oprules[opname] = append(oprules[opname], r)
@@ -227,7 +224,7 @@ func genRulesSuffix(arch arch, suff string) {
log.Fatalf("unconditional rule %s is followed by other rules", rr.Match)
}
rr = &RuleRewrite{Loc: rule.Loc}
- rr.Match, rr.Cond, rr.Result, rr.Typed = rule.parse()
+ rr.Match, rr.Cond, rr.Result = rule.parse()
pos, _ := genMatch(rr, arch, rr.Match, fn.ArgLen >= 0)
if pos == "" {
pos = "v.Pos"
@@ -786,7 +783,6 @@ type (
Alloc int // for unique var names
Loc string // file name & line number of the original rule
CommuteDepth int // used to track depth of commute loops
- Typed bool // aux and auxint fields should be strongly typed
}
Declare struct {
Name string
@@ -840,7 +836,7 @@ func breakf(format string, a ...interface{}) *CondBreak {
func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
rr := &RuleRewrite{Loc: rule.Loc}
- rr.Match, rr.Cond, rr.Result, rr.Typed = rule.parse()
+ rr.Match, rr.Cond, rr.Result = rule.parse()
_, _, auxint, aux, s := extract(rr.Match) // remove parens, then split
// check match of control values
@@ -884,15 +880,6 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
if e.name == "" {
continue
}
- if !rr.Typed {
- if !token.IsIdentifier(e.name) || rr.declared(e.name) {
- // code or variable
- rr.add(breakf("b.%s != %s", e.field, e.name))
- } else {
- rr.add(declf(e.name, "b.%s", e.field))
- }
- continue
- }
if e.dclType == "" {
log.Fatalf("op %s has no declared type for %s", data.name, e.field)
@@ -961,20 +948,12 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
}
if auxint != "" {
- if rr.Typed {
- // Make sure auxint value has the right type.
- rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint))
- } else {
- rr.add(stmtf("b.AuxInt = %s", auxint))
- }
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint))
}
if aux != "" {
- if rr.Typed {
- // Make sure aux value has the right type.
- rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux))
- } else {
- rr.add(stmtf("b.Aux = %s", aux))
- }
+ // Make sure aux value has the right type.
+ rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux))
}
succChanged := false
@@ -1019,6 +998,19 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
pos = v + ".Pos"
}
+ // If the last argument is ___, it means "don't care about trailing arguments, really"
+ // The likely/intended use is for rewrites that are too tricky to express in the existing pattern language
+ // Do a length check early because long patterns fed short (ultimately not-matching) inputs will
+ // do an indexing error in pattern-matching.
+ if op.argLength == -1 {
+ l := len(args)
+ if l == 0 || args[l-1] != "___" {
+ rr.add(breakf("len(%s.Args) != %d", v, l))
+ } else if l > 1 && args[l-1] == "___" {
+ rr.add(breakf("len(%s.Args) < %d", v, l-1))
+ }
+ }
+
for _, e := range []struct {
name, field, dclType string
}{
@@ -1029,15 +1021,6 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
if e.name == "" {
continue
}
- if !rr.Typed {
- if !token.IsIdentifier(e.name) || rr.declared(e.name) {
- // code or variable
- rr.add(breakf("%s.%s != %s", v, e.field, e.name))
- } else {
- rr.add(declf(e.name, "%s.%s", v, e.field))
- }
- continue
- }
if e.dclType == "" {
log.Fatalf("op %s has no declared type for %s", op.name, e.field)
@@ -1159,9 +1142,6 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
}
}
- if op.argLength == -1 {
- rr.add(breakf("len(%s.Args) != %d", v, len(args)))
- }
return pos, checkOp
}
@@ -1230,20 +1210,12 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
}
if auxint != "" {
- if rr.Typed {
- // Make sure auxint value has the right type.
- rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
- } else {
- rr.add(stmtf("%s.AuxInt = %s", v, auxint))
- }
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
}
if aux != "" {
- if rr.Typed {
- // Make sure aux value has the right type.
- rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
- } else {
- rr.add(stmtf("%s.Aux = %s", v, aux))
- }
+ // Make sure aux value has the right type.
+ rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
}
all := new(strings.Builder)
for i, arg := range args {
@@ -1524,7 +1496,7 @@ func excludeFromExpansion(s string, idx []int) bool {
return true
}
right := s[idx[1]:]
- if strings.Contains(left, "&&") && (strings.Contains(right, "->") || strings.Contains(right, "=>")) {
+ if strings.Contains(left, "&&") && strings.Contains(right, "=>") {
// Inside && conditions.
return true
}
@@ -1626,7 +1598,6 @@ func normalizeWhitespace(x string) string {
x = strings.Replace(x, " )", ")", -1)
x = strings.Replace(x, "[ ", "[", -1)
x = strings.Replace(x, " ]", "]", -1)
- x = strings.Replace(x, ")->", ") ->", -1)
x = strings.Replace(x, ")=>", ") =>", -1)
return x
}
@@ -1683,7 +1654,7 @@ func parseEllipsisRules(rules []Rule, arch arch) (newop string, ok bool) {
return "", false
}
rule := rules[0]
- match, cond, result, _ := rule.parse()
+ match, cond, result := rule.parse()
if cond != "" || !isEllipsisValue(match) || !isEllipsisValue(result) {
if strings.Contains(rule.Rule, "...") {
log.Fatalf("%s: found ellipsis in non-ellipsis rule", rule.Loc)
@@ -1708,7 +1679,7 @@ func isEllipsisValue(s string) bool {
}
func checkEllipsisRuleCandidate(rule Rule, arch arch) {
- match, cond, result, _ := rule.parse()
+ match, cond, result := rule.parse()
if cond != "" {
return
}
@@ -1718,7 +1689,7 @@ func checkEllipsisRuleCandidate(rule Rule, arch arch) {
var usingCopy string
var eop opData
if result[0] != '(' {
- // Check for (Foo x) -> x, which can be converted to (Foo ...) -> (Copy ...).
+ // Check for (Foo x) => x, which can be converted to (Foo ...) => (Copy ...).
args2 = []string{result}
usingCopy = " using Copy"
} else {
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index c781ca92cc..a9d52fa4ee 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -28,18 +28,23 @@ type HTMLWriter struct {
}
func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter {
+ path = strings.Replace(path, "/", string(filepath.Separator), -1)
out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
f.Fatalf("%v", err)
}
- pwd, err := os.Getwd()
- if err != nil {
- f.Fatalf("%v", err)
+ reportPath := path
+ if !filepath.IsAbs(reportPath) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath = filepath.Join(pwd, path)
}
html := HTMLWriter{
w: out,
Func: f,
- path: filepath.Join(pwd, path),
+ path: reportPath,
dot: newDotWriter(cfgMask),
}
html.start()
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 9b45dd53c7..6f029a421e 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -127,6 +127,17 @@ func (a *AuxCall) NResults() int64 {
return int64(len(a.results))
}
+// LateExpansionResultType returns the result type (including trailing mem)
+// for a call that will be expanded later in the SSA phase.
+func (a *AuxCall) LateExpansionResultType() *types.Type {
+ var tys []*types.Type
+ for i := int64(0); i < a.NResults(); i++ {
+ tys = append(tys, a.TypeOfResult(i))
+ }
+ tys = append(tys, types.TypeMem)
+ return types.NewResults(tys)
+}
+
// NArgs returns the number of arguments
func (a *AuxCall) NArgs() int64 {
return int64(len(a.args))
@@ -255,9 +266,6 @@ func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) }
func (x ValAndOff) Off() int64 { return int64(int32(x)) }
func (x ValAndOff) Off32() int32 { return int32(x) }
-func (x ValAndOff) Int64() int64 {
- return int64(x)
-}
func (x ValAndOff) String() string {
return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off())
}
@@ -286,17 +294,9 @@ func validValAndOff(val, off int64) bool {
return true
}
-// makeValAndOff encodes a ValAndOff into an int64 suitable for storing in an AuxInt field.
-func makeValAndOff(val, off int64) int64 {
- if !validValAndOff(val, off) {
- panic("invalid makeValAndOff")
- }
- return ValAndOff(val<<32 + int64(uint32(off))).Int64()
-}
func makeValAndOff32(val, off int32) ValAndOff {
return ValAndOff(int64(val)<<32 + int64(uint32(off)))
}
-
func makeValAndOff64(val, off int64) ValAndOff {
if !validValAndOff(val, off) {
panic("invalid makeValAndOff64")
@@ -304,35 +304,26 @@ func makeValAndOff64(val, off int64) ValAndOff {
return ValAndOff(val<<32 + int64(uint32(off)))
}
-func (x ValAndOff) canAdd(off int64) bool {
- newoff := x.Off() + off
- return newoff == int64(int32(newoff))
-}
-
func (x ValAndOff) canAdd32(off int32) bool {
newoff := x.Off() + int64(off)
return newoff == int64(int32(newoff))
}
-
-func (x ValAndOff) add(off int64) int64 {
- if !x.canAdd(off) {
- panic("invalid ValAndOff.add")
- }
- return makeValAndOff(x.Val(), x.Off()+off)
+func (x ValAndOff) canAdd64(off int64) bool {
+ newoff := x.Off() + off
+ return newoff == int64(int32(newoff))
}
func (x ValAndOff) addOffset32(off int32) ValAndOff {
if !x.canAdd32(off) {
- panic("invalid ValAndOff.add")
+ panic("invalid ValAndOff.addOffset32")
}
- return ValAndOff(makeValAndOff(x.Val(), x.Off()+int64(off)))
+ return makeValAndOff64(x.Val(), x.Off()+int64(off))
}
-
func (x ValAndOff) addOffset64(off int64) ValAndOff {
- if !x.canAdd(off) {
- panic("invalid ValAndOff.add")
+ if !x.canAdd64(off) {
+ panic("invalid ValAndOff.addOffset64")
}
- return ValAndOff(makeValAndOff(x.Val(), x.Off()+off))
+ return makeValAndOff64(x.Val(), x.Off()+off)
}
// int128 is a type that stores a 128-bit constant.
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 1fe00c7026..96aa3adedd 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -536,7 +536,6 @@ const (
Op386FlagLT_UGT
Op386FlagGT_UGT
Op386FlagGT_ULT
- Op386FCHS
Op386MOVSSconst1
Op386MOVSDconst1
Op386MOVSSconst2
@@ -1035,7 +1034,9 @@ const (
OpAMD64CMPXCHGLlock
OpAMD64CMPXCHGQlock
OpAMD64ANDBlock
+ OpAMD64ANDLlock
OpAMD64ORBlock
+ OpAMD64ORLlock
OpARMADD
OpARMADDconst
@@ -1587,7 +1588,9 @@ const (
OpARM64LoweredAtomicCas64
OpARM64LoweredAtomicCas32
OpARM64LoweredAtomicAnd8
+ OpARM64LoweredAtomicAnd32
OpARM64LoweredAtomicOr8
+ OpARM64LoweredAtomicOr32
OpARM64LoweredWB
OpARM64LoweredPanicBoundsA
OpARM64LoweredPanicBoundsB
@@ -1833,6 +1836,8 @@ const (
OpPPC64FSUBS
OpPPC64MULLD
OpPPC64MULLW
+ OpPPC64MULLDconst
+ OpPPC64MULLWconst
OpPPC64MADDLD
OpPPC64MULHD
OpPPC64MULHW
@@ -1866,6 +1871,9 @@ const (
OpPPC64ROTLconst
OpPPC64ROTLWconst
OpPPC64EXTSWSLconst
+ OpPPC64RLWINM
+ OpPPC64RLWNM
+ OpPPC64RLWMI
OpPPC64CNTLZD
OpPPC64CNTLZW
OpPPC64CNTTZD
@@ -2021,7 +2029,9 @@ const (
OpPPC64LoweredAtomicCas64
OpPPC64LoweredAtomicCas32
OpPPC64LoweredAtomicAnd8
+ OpPPC64LoweredAtomicAnd32
OpPPC64LoweredAtomicOr8
+ OpPPC64LoweredAtomicOr32
OpPPC64LoweredWB
OpPPC64LoweredPanicBoundsA
OpPPC64LoweredPanicBoundsB
@@ -2070,6 +2080,14 @@ const (
OpRISCV64MOVHstorezero
OpRISCV64MOVWstorezero
OpRISCV64MOVDstorezero
+ OpRISCV64MOVBreg
+ OpRISCV64MOVHreg
+ OpRISCV64MOVWreg
+ OpRISCV64MOVDreg
+ OpRISCV64MOVBUreg
+ OpRISCV64MOVHUreg
+ OpRISCV64MOVWUreg
+ OpRISCV64MOVDnop
OpRISCV64SLL
OpRISCV64SRA
OpRISCV64SRL
@@ -2093,6 +2111,8 @@ const (
OpRISCV64CALLstatic
OpRISCV64CALLclosure
OpRISCV64CALLinter
+ OpRISCV64DUFFZERO
+ OpRISCV64DUFFCOPY
OpRISCV64LoweredZero
OpRISCV64LoweredMove
OpRISCV64LoweredAtomicLoad8
@@ -2367,8 +2387,10 @@ const (
OpS390XLAAG
OpS390XAddTupleFirst32
OpS390XAddTupleFirst64
- OpS390XLAOfloor
+ OpS390XLAN
OpS390XLANfloor
+ OpS390XLAO
+ OpS390XLAOfloor
OpS390XLoweredAtomicCas32
OpS390XLoweredAtomicCas64
OpS390XLoweredAtomicExchange32
@@ -2732,7 +2754,9 @@ const (
OpClosureCall
OpStaticCall
OpInterCall
+ OpClosureLECall
OpStaticLECall
+ OpInterLECall
OpSignExt8to16
OpSignExt8to32
OpSignExt8to64
@@ -2836,11 +2860,13 @@ const (
OpAtomicLoad64
OpAtomicLoadPtr
OpAtomicLoadAcq32
+ OpAtomicLoadAcq64
OpAtomicStore8
OpAtomicStore32
OpAtomicStore64
OpAtomicStorePtrNoWB
OpAtomicStoreRel32
+ OpAtomicStoreRel64
OpAtomicExchange32
OpAtomicExchange64
OpAtomicAdd32
@@ -2849,7 +2875,9 @@ const (
OpAtomicCompareAndSwap64
OpAtomicCompareAndSwapRel32
OpAtomicAnd8
+ OpAtomicAnd32
OpAtomicOr8
+ OpAtomicOr32
OpAtomicAdd32Variant
OpAtomicAdd64Variant
OpClobber
@@ -6059,18 +6087,6 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "FCHS",
- argLen: 1,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
- },
- outputs: []outputInfo{
- {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
- },
- },
- },
- {
name: "MOVSSconst1",
auxType: auxFloat32,
argLen: 0,
@@ -13583,6 +13599,22 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ANDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
name: "ORBlock",
auxType: auxSymOff,
argLen: 3,
@@ -13598,6 +13630,22 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "ORLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
{
name: "ADD",
@@ -21068,6 +21116,24 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "LoweredAtomicOr8",
argLen: 3,
resultNotInArgs: true,
@@ -21086,6 +21152,24 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "LoweredWB",
auxType: auxSym,
argLen: 3,
@@ -24389,6 +24473,34 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "MADDLD",
argLen: 3,
asm: ppc64.AMADDLD,
@@ -24865,6 +24977,51 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "RLWINM",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWNM",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWMI",
+ auxType: auxInt64,
+ argLen: 2,
+ resultInArg0: true,
+ asm: ppc64.ARLWMI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "CNTLZD",
argLen: 1,
clobberFlags: true,
@@ -26928,6 +27085,19 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "LoweredAtomicOr8",
argLen: 3,
faultOnNilArg0: true,
@@ -26941,6 +27111,19 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "LoweredWB",
auxType: auxSym,
argLen: 3,
@@ -27018,11 +27201,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AADD,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27033,10 +27216,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AADDI,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27047,10 +27230,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AADDIW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27060,10 +27243,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ANEG,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27073,10 +27256,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ANEGW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27086,11 +27269,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASUB,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27100,11 +27283,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASUBW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27115,11 +27298,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMUL,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27130,11 +27313,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMULW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27145,11 +27328,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMULH,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27160,11 +27343,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMULHU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27174,11 +27357,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ADIV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27188,11 +27371,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ADIVU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27202,11 +27385,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ADIVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27216,11 +27399,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ADIVUW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27230,11 +27413,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AREM,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27244,11 +27427,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AREMU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27258,11 +27441,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AREMW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27272,11 +27455,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AREMUW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27289,10 +27472,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27304,7 +27487,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27316,7 +27499,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27328,7 +27511,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27340,7 +27523,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27353,10 +27536,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27369,10 +27552,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27385,10 +27568,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27401,10 +27584,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27417,10 +27600,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVBU,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27433,10 +27616,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVHU,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27449,10 +27632,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVWU,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27465,8 +27648,8 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27479,8 +27662,8 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27493,8 +27676,8 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27507,8 +27690,8 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27521,7 +27704,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVB,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27534,7 +27717,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVH,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27547,7 +27730,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27560,7 +27743,111 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27570,11 +27857,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASLL,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27584,11 +27871,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASRA,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27598,11 +27885,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASRL,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27613,10 +27900,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASLLI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27627,10 +27914,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASRAI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27641,10 +27928,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASRLI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27655,11 +27942,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AXOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27670,10 +27957,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AXORI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27684,11 +27971,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AOR,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27699,10 +27986,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AORI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27713,11 +28000,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.AAND,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27728,10 +28015,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AANDI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27741,10 +28028,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ANOT,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27754,10 +28041,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASEQZ,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27767,10 +28054,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASNEZ,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27780,11 +28067,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASLT,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27795,10 +28082,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASLTI,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27808,11 +28095,11 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASLTU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27823,10 +28110,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.ASLTIU,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27836,10 +28123,10 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27849,7 +28136,7 @@ var opcodeTable = [...]opInfo{
argLen: 1,
call: true,
reg: regInfo{
- clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
@@ -27860,9 +28147,9 @@ var opcodeTable = [...]opInfo{
reg: regInfo{
inputs: []inputInfo{
{1, 524288}, // X20
- {0, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
- clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
{
@@ -27872,9 +28159,35 @@ var opcodeTable = [...]opInfo{
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
- clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 512}, // X10
+ },
+ clobbers: 512, // X10
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1024}, // X11
+ {1, 512}, // X10
+ },
+ clobbers: 1536, // X10 X11
},
},
{
@@ -27885,7 +28198,7 @@ var opcodeTable = [...]opInfo{
reg: regInfo{
inputs: []inputInfo{
{0, 16}, // X5
- {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
clobbers: 16, // X5
},
@@ -27900,7 +28213,7 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 16}, // X5
{1, 32}, // X6
- {2, 1073741748}, // X3 X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {2, 1006632884}, // X3 X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
clobbers: 112, // X5 X6 X7
},
@@ -27911,10 +28224,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27924,10 +28237,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27937,10 +28250,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -27951,8 +28264,8 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27963,8 +28276,8 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27975,8 +28288,8 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
},
},
@@ -27988,11 +28301,11 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517630}, // SP X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28004,11 +28317,11 @@ var opcodeTable = [...]opInfo{
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517630}, // SP X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28021,11 +28334,11 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517630}, // SP X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28038,11 +28351,11 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517630}, // SP X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28055,12 +28368,12 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {2, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517630}, // SP X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28073,12 +28386,12 @@ var opcodeTable = [...]opInfo{
unsafePoint: true,
reg: regInfo{
inputs: []inputInfo{
- {1, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {2, 1073741820}, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
- {0, 9223372037928517630}, // SP X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28089,7 +28402,7 @@ var opcodeTable = [...]opInfo{
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28108,7 +28421,7 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
reg: regInfo{
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28118,7 +28431,7 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
reg: regInfo{
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28262,7 +28575,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AFMVSX,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28275,7 +28588,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AFCVTSW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28288,7 +28601,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AFCVTSL,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28304,7 +28617,7 @@ var opcodeTable = [...]opInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28317,7 +28630,7 @@ var opcodeTable = [...]opInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28330,7 +28643,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVF,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28346,7 +28659,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVF,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
@@ -28362,7 +28675,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28377,7 +28690,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28391,7 +28704,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28405,7 +28718,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28499,7 +28812,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AFMVDX,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28512,7 +28825,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AFCVTDW,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28525,7 +28838,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AFCVTDL,
reg: regInfo{
inputs: []inputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28541,7 +28854,7 @@ var opcodeTable = [...]opInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28554,7 +28867,7 @@ var opcodeTable = [...]opInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28593,7 +28906,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVD,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
},
outputs: []outputInfo{
{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
@@ -28609,7 +28922,7 @@ var opcodeTable = [...]opInfo{
asm: riscv.AMOVD,
reg: regInfo{
inputs: []inputInfo{
- {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
},
@@ -28625,7 +28938,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28640,7 +28953,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28654,7 +28967,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -28668,7 +28981,7 @@ var opcodeTable = [...]opInfo{
{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
},
outputs: []outputInfo{
- {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
},
},
},
@@ -31782,11 +32095,24 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LAOfloor",
+ name: "LAN",
argLen: 3,
clobberFlags: true,
hasSideEffects: true,
- asm: s390x.ALAO,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LANfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
@@ -31796,11 +32122,24 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LANfloor",
+ name: "LAO",
argLen: 3,
clobberFlags: true,
hasSideEffects: true,
- asm: s390x.ALAN,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAOfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
reg: regInfo{
inputs: []inputInfo{
{0, 2}, // R1
@@ -34852,6 +35191,13 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "ClosureLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
name: "StaticLECall",
auxType: auxCallOff,
argLen: -1,
@@ -34859,6 +35205,13 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "InterLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
name: "SignExt8to16",
argLen: 1,
generic: true,
@@ -35397,6 +35750,11 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "AtomicLoadAcq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
name: "AtomicStore8",
argLen: 3,
hasSideEffects: true,
@@ -35427,6 +35785,12 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "AtomicStoreRel64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
name: "AtomicExchange32",
argLen: 3,
hasSideEffects: true,
@@ -35475,12 +35839,24 @@ var opcodeTable = [...]opInfo{
generic: true,
},
{
+ name: "AtomicAnd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
name: "AtomicOr8",
argLen: 3,
hasSideEffects: true,
generic: true,
},
{
+ name: "AtomicOr32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
name: "AtomicAdd32Variant",
argLen: 3,
hasSideEffects: true,
@@ -35885,7 +36261,7 @@ var registersRISCV64 = [...]Register{
{0, riscv.REG_X0, -1, "X0"},
{1, riscv.REGSP, -1, "SP"},
{2, riscv.REG_X3, 0, "X3"},
- {3, riscv.REGG, -1, "g"},
+ {3, riscv.REG_X4, -1, "X4"},
{4, riscv.REG_X5, 1, "X5"},
{5, riscv.REG_X6, 2, "X6"},
{6, riscv.REG_X7, 3, "X7"},
@@ -35908,10 +36284,10 @@ var registersRISCV64 = [...]Register{
{23, riscv.REG_X24, 20, "X24"},
{24, riscv.REG_X25, 21, "X25"},
{25, riscv.REG_X26, 22, "X26"},
- {26, riscv.REG_X27, 23, "X27"},
- {27, riscv.REG_X28, 24, "X28"},
- {28, riscv.REG_X29, 25, "X29"},
- {29, riscv.REG_X30, 26, "X30"},
+ {26, riscv.REGG, -1, "g"},
+ {27, riscv.REG_X28, 23, "X28"},
+ {28, riscv.REG_X29, 24, "X29"},
+ {29, riscv.REG_X30, 25, "X30"},
{30, riscv.REG_X31, -1, "X31"},
{31, riscv.REG_F0, -1, "F0"},
{32, riscv.REG_F1, -1, "F1"},
@@ -35947,7 +36323,7 @@ var registersRISCV64 = [...]Register{
{62, riscv.REG_F31, -1, "F31"},
{63, 0, -1, "SB"},
}
-var gpRegMaskRISCV64 = regMask(1073741812)
+var gpRegMaskRISCV64 = regMask(1006632948)
var fpRegMaskRISCV64 = regMask(9223372034707292160)
var specialRegMaskRISCV64 = regMask(0)
var framepointerRegRISCV64 = int8(-1)
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 64c6aed3e7..0339b073ae 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -625,9 +625,6 @@ func (s *regAllocState) init(f *Func) {
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
}
}
- if s.f.Config.use387 {
- s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
- }
// Linear scan register allocation can be influenced by the order in which blocks appear.
// Decouple the register allocation order from the generated block order.
@@ -1015,8 +1012,8 @@ func (s *regAllocState) regalloc(f *Func) {
// Copy phi ops into new schedule.
b.Values = append(b.Values, phis...)
- // Third pass - pick registers for phis whose inputs
- // were not in a register.
+ // Third pass - pick registers for phis whose input
+ // was not in a register in the primary predecessor.
for i, v := range phis {
if !s.values[v.ID].needReg {
continue
@@ -1024,10 +1021,25 @@ func (s *regAllocState) regalloc(f *Func) {
if phiRegs[i] != noRegister {
continue
}
- if s.f.Config.use387 && v.Type.IsFloat() {
- continue // 387 can't handle floats in registers between blocks
- }
m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
+ // If one of the other inputs of v is in a register, and the register is available,
+ // select this register, which can save some unnecessary copies.
+ for i, pe := range b.Preds {
+ if int32(i) == idx {
+ continue
+ }
+ ri := noRegister
+ for _, er := range s.endRegs[pe.b.ID] {
+ if er.v == s.orig[v.Args[i].ID] {
+ ri = er.r
+ break
+ }
+ }
+ if ri != noRegister && m>>ri&1 != 0 {
+ m = regMask(1) << ri
+ break
+ }
+ }
if m != 0 {
r := pickReg(m)
phiRegs[i] = r
@@ -1125,7 +1137,19 @@ func (s *regAllocState) regalloc(f *Func) {
}
rp, ok := s.f.getHome(v.ID).(*Register)
if !ok {
- continue
+ // If v is not assigned a register, pick a register assigned to one of v's inputs.
+ // Hopefully v will get assigned that register later.
+ // If the inputs have allocated register information, add it to desired,
+ // which may reduce spill or copy operations when the register is available.
+ for _, a := range v.Args {
+ rp, ok = s.f.getHome(a.ID).(*Register)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
}
desired.add(v.Args[pidx].ID, register(rp.num))
}
@@ -1528,11 +1552,6 @@ func (s *regAllocState) regalloc(f *Func) {
s.freeUseRecords = u
}
- // Spill any values that can't live across basic block boundaries.
- if s.f.Config.use387 {
- s.freeRegs(s.f.Config.fpRegMask)
- }
-
// If we are approaching a merge point and we are the primary
// predecessor of it, find live values that we use soon after
// the merge point and promote them to registers now.
@@ -1562,10 +1581,20 @@ func (s *regAllocState) regalloc(f *Func) {
continue
}
v := s.orig[vid]
- if s.f.Config.use387 && v.Type.IsFloat() {
- continue // 387 can't handle floats in registers between blocks
- }
m := s.compatRegs(v.Type) &^ s.used
+ // Used desired register if available.
+ outerloop:
+ for _, e := range desired.entries {
+ if e.ID != v.ID {
+ continue
+ }
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ break outerloop
+ }
+ }
+ }
if m&^desired.avoid != 0 {
m &^= desired.avoid
}
@@ -1627,7 +1656,9 @@ func (s *regAllocState) regalloc(f *Func) {
// we'll rematerialize during the merge.
continue
}
- //fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b)
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b)
+ }
spill := s.makeSpill(s.orig[e.ID], b)
s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
}
@@ -2498,7 +2529,7 @@ func (s *regAllocState) computeLive() {
for _, b := range f.Blocks {
fmt.Printf(" %s:", b)
for _, x := range s.live[b.ID] {
- fmt.Printf(" v%d", x.ID)
+ fmt.Printf(" v%d(%d)", x.ID, x.dist)
for _, e := range s.desired[b.ID].entries {
if e.ID != x.ID {
continue
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index d9c3e455a0..ab6d020942 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -395,7 +395,8 @@ func canMergeLoad(target, load *Value) bool {
// isSameCall reports whether sym is the same as the given named symbol
func isSameCall(sym interface{}, name string) bool {
- return sym.(*AuxCall).Fn.String() == name
+ fn := sym.(*AuxCall).Fn
+ return fn != nil && fn.String() == name
}
// nlz returns the number of leading zeros.
@@ -448,10 +449,7 @@ func log2uint32(n int64) int64 {
return int64(bits.Len32(uint32(n))) - 1
}
-// isPowerOfTwo reports whether n is a power of 2.
-func isPowerOfTwo(n int64) bool {
- return n > 0 && n&(n-1) == 0
-}
+// isPowerOfTwo functions report whether n is a power of 2.
func isPowerOfTwo8(n int8) bool {
return n > 0 && n&(n-1) == 0
}
@@ -764,6 +762,36 @@ func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall {
return StaticAuxCall(lsym, va.args, va.results)
}
+// de-virtualize an InterLECall
+// 'sym' is the symbol for the itab
+func devirtLESym(v *Value, aux interface{}, sym Sym, offset int64) *obj.LSym {
+ n, ok := sym.(*obj.LSym)
+ if !ok {
+ return nil
+ }
+
+ f := v.Block.Func
+ lsym := f.fe.DerefItab(n, offset)
+ if f.pass.debug > 0 {
+ if lsym != nil {
+ f.Warnl(v.Pos, "de-virtualizing call")
+ } else {
+ f.Warnl(v.Pos, "couldn't de-virtualize call")
+ }
+ }
+ if lsym == nil {
+ return nil
+ }
+ return lsym
+}
+
+func devirtLECall(v *Value, sym *obj.LSym) *Value {
+ v.Op = OpStaticLECall
+ v.Aux.(*AuxCall).Fn = sym
+ v.RemoveArg(0)
+ return v
+}
+
// isSamePtr reports whether p1 and p2 point to the same address.
func isSamePtr(p1, p2 *Value) bool {
if p1 == p2 {
@@ -1350,8 +1378,73 @@ func GetPPC64Shiftme(auxint int64) int64 {
return int64(int8(auxint))
}
-// Catch the simple ones first
-// TODO: Later catch more cases
+// Test if this value can encoded as a mask for a rlwinm like
+// operation. Masks can also extend from the msb and wrap to
+// the lsb too. That is, the valid masks are 32 bit strings
+// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
+func isPPC64WordRotateMask(v64 int64) bool {
+ // Isolate rightmost 1 (if none 0) and add.
+ v := uint32(v64)
+ vp := (v & -v) + v
+ // Likewise, for the wrapping case.
+ vn := ^v
+ vpn := (vn & -vn) + vn
+ return (v&vp == 0 || vn&vpn == 0) && v != 0
+}
+
+// Compress mask and and shift into single value of the form
+// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
+// be used to regenerate the input mask.
+func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
+ var mb, me, mbn, men int
+
+ // Determine boundaries and then decode them
+ if mask == 0 || ^mask == 0 || rotate >= nbits {
+ panic("Invalid PPC64 rotate mask")
+ } else if nbits == 32 {
+ mb = bits.LeadingZeros32(uint32(mask))
+ me = 32 - bits.TrailingZeros32(uint32(mask))
+ mbn = bits.LeadingZeros32(^uint32(mask))
+ men = 32 - bits.TrailingZeros32(^uint32(mask))
+ } else {
+ mb = bits.LeadingZeros64(uint64(mask))
+ me = 64 - bits.TrailingZeros64(uint64(mask))
+ mbn = bits.LeadingZeros64(^uint64(mask))
+ men = 64 - bits.TrailingZeros64(^uint64(mask))
+ }
+ // Check for a wrapping mask (e.g bits at 0 and 63)
+ if mb == 0 && me == int(nbits) {
+ // swap the inverted values
+ mb, me = men, mbn
+ }
+
+ return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
+}
+
+// The inverse operation of encodePPC64RotateMask. The values returned as
+// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
+func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
+ auxint := uint64(sauxint)
+ rotate = int64((auxint >> 16) & 0xFF)
+ mb = int64((auxint >> 8) & 0xFF)
+ me = int64((auxint >> 0) & 0xFF)
+ nbits := int64((auxint >> 24) & 0xFF)
+ mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1)
+ if mb > me {
+ mask = ^mask
+ }
+ if nbits == 32 {
+ mask = uint64(uint32(mask))
+ }
+
+ // Fixup ME to match ISA definition. The second argument to MASK(..,me)
+ // is inclusive.
+ me = (me - 1) & (nbits - 1)
+ return
+}
+
+// This verifies that the mask occupies the
+// rightmost bits.
func isPPC64ValidShiftMask(v int64) bool {
if ((v + 1) & v) == 0 {
return true
@@ -1363,6 +1456,78 @@ func getPPC64ShiftMaskLength(v int64) int64 {
return int64(bits.Len64(uint64(v)))
}
+// Decompose a shift right into an equivalent rotate/mask,
+// and return mask & m.
+func mergePPC64RShiftMask(m, s, nbits int64) int64 {
+ smask := uint64((1<<uint(nbits))-1) >> uint(s)
+ return m & int64(smask)
+}
+
+// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0
+func mergePPC64AndSrwi(m, s int64) int64 {
+ mask := mergePPC64RShiftMask(m, s, 32)
+ if !isPPC64WordRotateMask(mask) {
+ return 0
+ }
+ return encodePPC64RotateMask(32-s, mask, 32)
+}
+
+// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM.
+// Return the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
+ mask_1 := uint64(0xFFFFFFFF >> uint(srw))
+ // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // Rewrite mask to apply after the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
+
+ r_1 := 32 - srw
+ r_2 := GetPPC64Shiftsh(sld)
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
+ return 0
+ }
+ return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
+}
+
+// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
+// the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
+ r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw)
+ // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // combine the masks, and adjust for the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(int64(sld)))
+ r_2 := GetPPC64Shiftsh(int64(sld))
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_3)) || uint64(uint32(mask_3)) != mask_3 {
+ return 0
+ }
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
+}
+
+// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
+// or return 0 if they cannot be combined.
+func mergePPC64SldiSrw(sld, srw int64) int64 {
+ if sld > srw || srw >= 32 {
+ return 0
+ }
+ mask_r := uint32(0xFFFFFFFF) >> uint(srw)
+ mask_l := uint32(0xFFFFFFFF) >> uint(sld)
+ mask := (mask_r & mask_l) << uint(sld)
+ return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
+}
+
+// Convenience function to rotate a 32 bit constant value by another constant.
+func rotateLeft32(v, rotate int64) int64 {
+ return int64(bits.RotateLeft32(uint32(v), int(rotate)))
+}
+
// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
func armBFAuxInt(lsb, width int64) arm64BitField {
if lsb < 0 || lsb > 63 {
@@ -1387,7 +1552,7 @@ func (bfc arm64BitField) getARM64BFwidth() int64 {
// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
func isARM64BFMask(lsb, mask, rshift int64) bool {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
- return shiftedMask != 0 && isPowerOfTwo(shiftedMask+1) && nto(shiftedMask)+lsb < 64
+ return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64
}
// returns the bitfield width of mask >> rshift for arm64 bitfield ops
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index fc1e0541b2..afce14fa76 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -1310,10 +1310,8 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool {
func rewriteValue386_Op386ADDSD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (ADDSDload x [off] {sym} ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1326,7 +1324,7 @@ func rewriteValue386_Op386ADDSD(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386ADDSDload)
@@ -1395,10 +1393,8 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool {
func rewriteValue386_Op386ADDSS(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (ADDSSload x [off] {sym} ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -1411,7 +1407,7 @@ func rewriteValue386_Op386ADDSS(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386ADDSSload)
@@ -2640,10 +2636,8 @@ func rewriteValue386_Op386CMPWload(v *Value) bool {
func rewriteValue386_Op386DIVSD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (DIVSDload x [off] {sym} ptr mem)
for {
x := v_0
@@ -2655,7 +2649,7 @@ func rewriteValue386_Op386DIVSD(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(Op386DIVSDload)
@@ -2722,10 +2716,8 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool {
func rewriteValue386_Op386DIVSS(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (DIVSSload x [off] {sym} ptr mem)
for {
x := v_0
@@ -2737,7 +2729,7 @@ func rewriteValue386_Op386DIVSS(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(Op386DIVSSload)
@@ -6104,10 +6096,8 @@ func rewriteValue386_Op386MULLload(v *Value) bool {
func rewriteValue386_Op386MULSD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (MULSDload x [off] {sym} ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6120,7 +6110,7 @@ func rewriteValue386_Op386MULSD(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386MULSDload)
@@ -6189,10 +6179,8 @@ func rewriteValue386_Op386MULSDload(v *Value) bool {
func rewriteValue386_Op386MULSS(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (MULSSload x [off] {sym} ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6205,7 +6193,7 @@ func rewriteValue386_Op386MULSS(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
continue
}
v.reset(Op386MULSSload)
@@ -8187,10 +8175,8 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool {
func rewriteValue386_Op386SUBSD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (SUBSDload x [off] {sym} ptr mem)
for {
x := v_0
@@ -8202,7 +8188,7 @@ func rewriteValue386_Op386SUBSD(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(Op386SUBSDload)
@@ -8269,10 +8255,8 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool {
func rewriteValue386_Op386SUBSS(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- b := v.Block
- config := b.Func.Config
// match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
- // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
// result: (SUBSSload x [off] {sym} ptr mem)
for {
x := v_0
@@ -8284,7 +8268,7 @@ func rewriteValue386_Op386SUBSS(v *Value) bool {
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
- if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) {
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
break
}
v.reset(Op386SUBSSload)
@@ -8712,11 +8696,11 @@ func rewriteValue386_OpConst8(v *Value) bool {
}
func rewriteValue386_OpConstBool(v *Value) bool {
// match: (ConstBool [c])
- // result: (MOVLconst [int32(b2i(c))])
+ // result: (MOVLconst [b2i32(c)])
for {
c := auxIntToBool(v.AuxInt)
v.reset(Op386MOVLconst)
- v.AuxInt = int32ToAuxInt(int32(b2i(c)))
+ v.AuxInt = int32ToAuxInt(b2i32(c))
return true
}
}
@@ -10043,68 +10027,32 @@ func rewriteValue386_OpMove(v *Value) bool {
func rewriteValue386_OpNeg32F(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Neg32F x)
- // cond: !config.use387
// result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
for {
x := v_0
- if !(!config.use387) {
- break
- }
v.reset(Op386PXOR)
v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
v.AddArg2(x, v0)
return true
}
- // match: (Neg32F x)
- // cond: config.use387
- // result: (FCHS x)
- for {
- x := v_0
- if !(config.use387) {
- break
- }
- v.reset(Op386FCHS)
- v.AddArg(x)
- return true
- }
- return false
}
func rewriteValue386_OpNeg64F(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Neg64F x)
- // cond: !config.use387
// result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
for {
x := v_0
- if !(!config.use387) {
- break
- }
v.reset(Op386PXOR)
v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
v.AddArg2(x, v0)
return true
}
- // match: (Neg64F x)
- // cond: config.use387
- // result: (FCHS x)
- for {
- x := v_0
- if !(config.use387) {
- break
- }
- v.reset(Op386FCHS)
- v.AddArg(x)
- return true
- }
- return false
}
func rewriteValue386_OpNeq16(v *Value) bool {
v_1 := v.Args[1]
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 3d7eb8c9a4..ed84812a03 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -572,6 +572,8 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAtomicAdd32(v)
case OpAtomicAdd64:
return rewriteValueAMD64_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ return rewriteValueAMD64_OpAtomicAnd32(v)
case OpAtomicAnd8:
return rewriteValueAMD64_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
@@ -590,6 +592,8 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAtomicLoad8(v)
case OpAtomicLoadPtr:
return rewriteValueAMD64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ return rewriteValueAMD64_OpAtomicOr32(v)
case OpAtomicOr8:
return rewriteValueAMD64_OpAtomicOr8(v)
case OpAtomicStore32:
@@ -6886,7 +6890,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
return true
}
// match: (CMPBconst (ANDLconst _ [m]) [n])
- // cond: 0 <= m && int8(m) < n
+ // cond: 0 <= int8(m) && int8(m) < n
// result: (FlagLT_ULT)
for {
n := auxIntToInt8(v.AuxInt)
@@ -6894,7 +6898,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
break
}
m := auxIntToInt32(v_0.AuxInt)
- if !(0 <= m && int8(m) < n) {
+ if !(0 <= int8(m) && int8(m) < n) {
break
}
v.reset(OpAMD64FlagLT_ULT)
@@ -8243,7 +8247,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
return true
}
// match: (CMPWconst (ANDLconst _ [m]) [n])
- // cond: 0 <= m && int16(m) < n
+ // cond: 0 <= int16(m) && int16(m) < n
// result: (FlagLT_ULT)
for {
n := auxIntToInt16(v.AuxInt)
@@ -8251,7 +8255,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
break
}
m := auxIntToInt32(v_0.AuxInt)
- if !(0 <= m && int16(m) < n) {
+ if !(0 <= int16(m) && int16(m) < n) {
break
}
v.reset(OpAMD64FlagLT_ULT)
@@ -15830,12 +15834,12 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
return true
}
// match: (MULLconst [c] x)
- // cond: isPowerOfTwo(int64(c)+1) && c >= 15
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
// result: (SUBL (SHLLconst <v.Type> [int8(log2(int64(c)+1))] x) x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(int64(c)+1) && c >= 15) {
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
break
}
v.reset(OpAMD64SUBL)
@@ -16277,12 +16281,12 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
return true
}
// match: (MULQconst [c] x)
- // cond: isPowerOfTwo(int64(c)+1) && c >= 15
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
// result: (SUBQ (SHLQconst <v.Type> [int8(log2(int64(c)+1))] x) x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(isPowerOfTwo(int64(c)+1) && c >= 15) {
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
break
}
v.reset(OpAMD64SUBQ)
@@ -28476,6 +28480,21 @@ func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
return true
}
}
+func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (ANDLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -28607,6 +28626,21 @@ func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
return true
}
}
+func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr32 ptr val mem)
+ // result: (ORLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -29685,11 +29719,11 @@ func rewriteValueAMD64_OpConst8(v *Value) bool {
}
func rewriteValueAMD64_OpConstBool(v *Value) bool {
// match: (ConstBool [c])
- // result: (MOVLconst [int32(b2i(c))])
+ // result: (MOVLconst [b2i32(c)])
for {
c := auxIntToBool(v.AuxInt)
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int32ToAuxInt(int32(b2i(c)))
+ v.AuxInt = int32ToAuxInt(b2i32(c))
return true
}
}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 4e44165169..6ade8283d6 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -448,8 +448,7 @@ func rewriteValueARM(v *Value) bool {
v.Op = OpARMADD
return true
case OpAddr:
- v.Op = OpARMMOVWaddr
- return true
+ return rewriteValueARM_OpAddr(v)
case OpAnd16:
v.Op = OpARMAND
return true
@@ -481,23 +480,17 @@ func rewriteValueARM(v *Value) bool {
v.Op = OpARMMVN
return true
case OpConst16:
- v.Op = OpARMMOVWconst
- return true
+ return rewriteValueARM_OpConst16(v)
case OpConst32:
- v.Op = OpARMMOVWconst
- return true
+ return rewriteValueARM_OpConst32(v)
case OpConst32F:
- v.Op = OpARMMOVFconst
- return true
+ return rewriteValueARM_OpConst32F(v)
case OpConst64F:
- v.Op = OpARMMOVDconst
- return true
+ return rewriteValueARM_OpConst64F(v)
case OpConst8:
- v.Op = OpARMMOVWconst
- return true
+ return rewriteValueARM_OpConst8(v)
case OpConstBool:
- v.Op = OpARMMOVWconst
- return true
+ return rewriteValueARM_OpConstBool(v)
case OpConstNil:
return rewriteValueARM_OpConstNil(v)
case OpCtz16:
@@ -3362,21 +3355,6 @@ func rewriteValueARM_OpARMCMN(v *Value) bool {
}
break
}
- // match: (CMN x (RSBconst [0] y))
- // result: (CMP x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 {
- continue
- }
- y := v_1.Args[0]
- v.reset(OpARMCMP)
- v.AddArg2(x, y)
- return true
- }
- break
- }
return false
}
func rewriteValueARM_OpARMCMNconst(v *Value) bool {
@@ -3938,18 +3916,6 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
v.AddArg(v0)
return true
}
- // match: (CMP x (RSBconst [0] y))
- // result: (CMN x y)
- for {
- x := v_0
- if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 {
- break
- }
- y := v_1.Args[0]
- v.reset(OpARMCMN)
- v.AddArg2(x, y)
- return true
- }
return false
}
func rewriteValueARM_OpARMCMPD(v *Value) bool {
@@ -4627,15 +4593,15 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
}
// match: (MOVBUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVWconst [int64(read8(sym, off))])
+ // result: (MOVWconst [int32(read8(sym, int64(off)))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
- v.AuxInt = int64(read8(sym, off))
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
return true
}
return false
@@ -5540,15 +5506,15 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
}
// match: (MOVHUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVWconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
- v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@@ -6261,15 +6227,15 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool {
}
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVWconst [int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))])
+ // result: (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARMMOVWconst)
- v.AuxInt = int64(int32(read32(sym, off, config.ctxt.Arch.ByteOrder)))
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
@@ -6439,17 +6405,17 @@ func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
return true
}
// match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
- // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+ // result: (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
for {
- d := v.AuxInt
+ d := auxIntToInt32(v.AuxInt)
ptr := v_0
if v_1.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
v.reset(OpARMMOVWload)
- v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
v.AddArg2(ptr, mem)
return true
}
@@ -6519,17 +6485,17 @@ func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
return true
}
// match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
- // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+ // result: (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
for {
- d := v.AuxInt
+ d := auxIntToInt32(v.AuxInt)
ptr := v_0
if v_1.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
v.reset(OpARMMOVWload)
- v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
v.AddArg2(ptr, mem)
return true
}
@@ -6860,18 +6826,18 @@ func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
- // result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+ // result: (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
for {
- d := v.AuxInt
+ d := auxIntToInt32(v.AuxInt)
ptr := v_0
if v_1.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
val := v_2
mem := v_3
v.reset(OpARMMOVWstore)
- v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
v.AddArg3(ptr, val, mem)
return true
}
@@ -6906,18 +6872,18 @@ func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
- // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+ // result: (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
for {
- d := v.AuxInt
+ d := auxIntToInt32(v.AuxInt)
ptr := v_0
if v_1.Op != OpARMMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
val := v_2
mem := v_3
v.reset(OpARMMOVWstore)
- v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
v.AddArg3(ptr, val, mem)
return true
}
@@ -8132,15 +8098,15 @@ func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
func rewriteValueARM_OpARMMVNshiftRA(v *Value) bool {
v_0 := v.Args[0]
// match: (MVNshiftRA (MOVWconst [c]) [d])
- // result: (MOVWconst [^int64(int32(c)>>uint64(d))])
+ // result: (MOVWconst [int32(c)>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt32(v.AuxInt)
if v_0.Op != OpARMMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpARMMOVWconst)
- v.AuxInt = ^int64(int32(c) >> uint64(d))
+ v.AuxInt = int32ToAuxInt(int32(c) >> uint64(d))
return true
}
return false
@@ -8166,15 +8132,15 @@ func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
func rewriteValueARM_OpARMMVNshiftRL(v *Value) bool {
v_0 := v.Args[0]
// match: (MVNshiftRL (MOVWconst [c]) [d])
- // result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
+ // result: (MOVWconst [^int32(uint32(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt32(v.AuxInt)
if v_0.Op != OpARMMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpARMMOVWconst)
- v.AuxInt = ^int64(uint32(c) >> uint64(d))
+ v.AuxInt = int32ToAuxInt(^int32(uint32(c) >> uint64(d)))
return true
}
return false
@@ -12900,6 +12866,19 @@ func rewriteValueARM_OpARMXORshiftRR(v *Value) bool {
}
return false
}
+func rewriteValueARM_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
func rewriteValueARM_OpAvg32u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -12981,6 +12960,66 @@ func rewriteValueARM_OpBswap32(v *Value) bool {
}
return false
}
+func rewriteValueARM_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVWconst [b2i32(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(b))
+ return true
+ }
+}
func rewriteValueARM_OpConstNil(v *Value) bool {
// match: (ConstNil)
// result: (MOVWconst [0])
@@ -14675,25 +14714,25 @@ func rewriteValueARM_OpNot(v *Value) bool {
func rewriteValueARM_OpOffPtr(v *Value) bool {
v_0 := v.Args[0]
// match: (OffPtr [off] ptr:(SP))
- // result: (MOVWaddr [off] ptr)
+ // result: (MOVWaddr [int32(off)] ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
if ptr.Op != OpSP {
break
}
v.reset(OpARMMOVWaddr)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
- // result: (ADDconst [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
for {
- off := v.AuxInt
+ off := auxIntToInt64(v.AuxInt)
ptr := v_0
v.reset(OpARMADDconst)
- v.AuxInt = off
+ v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
@@ -16002,6 +16041,42 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMEQ, cmp)
return true
}
+ // match: (EQ (CMP x (RSBconst [0] y)))
+ // result: (EQ (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMN x (RSBconst [0] y)))
+ // result: (EQ (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
// match: (EQ (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (EQ (CMP x y) yes no)
@@ -16848,6 +16923,42 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMLE, cmp)
return true
}
+ // match: (GE (CMP x (RSBconst [0] y)))
+ // result: (GE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGE, v0)
+ return true
+ }
+ // match: (GE (CMN x (RSBconst [0] y)))
+ // result: (GE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGE, v0)
+ return true
+ }
+ break
+ }
// match: (GE (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (GEnoov (CMP x y) yes no)
@@ -17728,6 +17839,42 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMLT, cmp)
return true
}
+ // match: (GT (CMP x (RSBconst [0] y)))
+ // result: (GT (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGT, v0)
+ return true
+ }
+ // match: (GT (CMN x (RSBconst [0] y)))
+ // result: (GT (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGT, v0)
+ return true
+ }
+ break
+ }
// match: (GT (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (GTnoov (CMP x y) yes no)
@@ -18699,6 +18846,42 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMGE, cmp)
return true
}
+ // match: (LE (CMP x (RSBconst [0] y)))
+ // result: (LE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLE, v0)
+ return true
+ }
+ // match: (LE (CMN x (RSBconst [0] y)))
+ // result: (LE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLE, v0)
+ return true
+ }
+ break
+ }
// match: (LE (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (LEnoov (CMP x y) yes no)
@@ -19579,6 +19762,42 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMGT, cmp)
return true
}
+ // match: (LT (CMP x (RSBconst [0] y)))
+ // result: (LT (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLT, v0)
+ return true
+ }
+ // match: (LT (CMN x (RSBconst [0] y)))
+ // result: (LT (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLT, v0)
+ return true
+ }
+ break
+ }
// match: (LT (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (LTnoov (CMP x y) yes no)
@@ -20609,6 +20828,42 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMNE, cmp)
return true
}
+ // match: (NE (CMP x (RSBconst [0] y)))
+ // result: (NE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMN x (RSBconst [0] y)))
+ // result: (NE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
// match: (NE (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (NE (CMP x y) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 6c48812121..774e2ead68 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -424,6 +424,8 @@ func rewriteValueARM64(v *Value) bool {
case OpAtomicAdd64Variant:
v.Op = OpARM64LoweredAtomicAdd64Variant
return true
+ case OpAtomicAnd32:
+ return rewriteValueARM64_OpAtomicAnd32(v)
case OpAtomicAnd8:
return rewriteValueARM64_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
@@ -450,6 +452,8 @@ func rewriteValueARM64(v *Value) bool {
case OpAtomicLoadPtr:
v.Op = OpARM64LDAR
return true
+ case OpAtomicOr32:
+ return rewriteValueARM64_OpAtomicOr32(v)
case OpAtomicOr8:
return rewriteValueARM64_OpAtomicOr8(v)
case OpAtomicStore32:
@@ -4888,7 +4892,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (ADDshiftLL a x [log2(c)])
for {
a := v_0
@@ -4897,7 +4901,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -4906,7 +4910,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && c>=3
+ // cond: isPowerOfTwo64(c-1) && c>=3
// result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -4915,7 +4919,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c-1) && c >= 3) {
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
break
}
v.reset(OpARM64ADD)
@@ -4926,7 +4930,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && c>=7
+ // cond: isPowerOfTwo64(c+1) && c>=7
// result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -4935,7 +4939,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c+1) && c >= 7) {
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
break
}
v.reset(OpARM64SUB)
@@ -4946,7 +4950,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -4955,7 +4959,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -4967,7 +4971,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -4976,7 +4980,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -4988,7 +4992,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -4997,7 +5001,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5009,7 +5013,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -5018,7 +5022,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5064,7 +5068,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (ADDshiftLL a x [log2(c)])
for {
a := v_0
@@ -5073,7 +5077,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5082,7 +5086,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c-1) && c>=3
+ // cond: isPowerOfTwo64(c-1) && c>=3
// result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -5091,7 +5095,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c-1) && c >= 3) {
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
break
}
v.reset(OpARM64ADD)
@@ -5102,7 +5106,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c+1) && c>=7
+ // cond: isPowerOfTwo64(c+1) && c>=7
// result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -5111,7 +5115,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c+1) && c >= 7) {
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
break
}
v.reset(OpARM64SUB)
@@ -5122,7 +5126,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -5131,7 +5135,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5143,7 +5147,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -5152,7 +5156,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5164,7 +5168,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -5173,7 +5177,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5185,7 +5189,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
return true
}
// match: (MADD a (MOVDconst [c]) x)
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -5194,7 +5198,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5295,7 +5299,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (ADDshiftLL a x [log2(c)])
for {
a := v_0
@@ -5304,7 +5308,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5313,7 +5317,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
// result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -5322,7 +5326,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
v.reset(OpARM64ADD)
@@ -5333,7 +5337,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
// result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -5342,7 +5346,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
v.reset(OpARM64SUB)
@@ -5353,7 +5357,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -5362,7 +5366,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5374,7 +5378,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -5383,7 +5387,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5395,7 +5399,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -5404,7 +5408,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5416,7 +5420,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -5425,7 +5429,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5486,7 +5490,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (ADDshiftLL a x [log2(c)])
for {
a := v_0
@@ -5495,7 +5499,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5504,7 +5508,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
// result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -5513,7 +5517,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
v.reset(OpARM64ADD)
@@ -5524,7 +5528,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
// result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -5533,7 +5537,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
v.reset(OpARM64SUB)
@@ -5544,7 +5548,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -5553,7 +5557,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5565,7 +5569,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -5574,7 +5578,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5586,7 +5590,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
// result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -5595,7 +5599,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -5607,7 +5611,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
return true
}
// match: (MADDW a (MOVDconst [c]) x)
- // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
// result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -5616,7 +5620,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -5707,7 +5711,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (NEG (SLLconst <x.Type> [log2(c)] x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5716,7 +5720,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
continue
}
v.reset(OpARM64NEG)
@@ -5729,7 +5733,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && c >= 3
+ // cond: isPowerOfTwo64(c-1) && c >= 3
// result: (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5738,7 +5742,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c-1) && c >= 3) {
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
continue
}
v.reset(OpARM64NEG)
@@ -5751,7 +5755,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && c >= 7
+ // cond: isPowerOfTwo64(c+1) && c >= 7
// result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5760,7 +5764,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c+1) && c >= 7) {
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
continue
}
v.reset(OpARM64NEG)
@@ -5775,7 +5779,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
// result: (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5784,7 +5788,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -5799,7 +5803,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
// result: (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5808,7 +5812,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
continue
}
v.reset(OpARM64NEG)
@@ -5824,7 +5828,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
// result: (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5833,7 +5837,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -5848,7 +5852,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
break
}
// match: (MNEG x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
// result: (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5857,7 +5861,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
continue
}
v.reset(OpARM64NEG)
@@ -5952,7 +5956,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (NEG (SLLconst <x.Type> [log2(c)] x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5961,7 +5965,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
continue
}
v.reset(OpARM64NEG)
@@ -5974,7 +5978,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
// result: (NEG (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -5983,7 +5987,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
continue
}
v.reset(OpARM64NEG)
@@ -5996,7 +6000,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
// result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log2(c+1)]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6005,7 +6009,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
continue
}
v.reset(OpARM64NEG)
@@ -6020,7 +6024,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
// result: (SLLconst <x.Type> [log2(c/3)] (SUBshiftLL <x.Type> x x [2]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6029,7 +6033,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -6044,7 +6048,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
// result: (NEG (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6053,7 +6057,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
continue
}
v.reset(OpARM64NEG)
@@ -6069,7 +6073,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
// result: (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6078,7 +6082,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -6093,7 +6097,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
break
}
// match: (MNEGW x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
// result: (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -6102,7 +6106,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
continue
}
v.reset(OpARM64NEG)
@@ -13376,7 +13380,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SUBshiftLL a x [log2(c)])
for {
a := v_0
@@ -13385,7 +13389,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13394,7 +13398,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && c>=3
+ // cond: isPowerOfTwo64(c-1) && c>=3
// result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -13403,7 +13407,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c-1) && c >= 3) {
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
break
}
v.reset(OpARM64SUB)
@@ -13414,7 +13418,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && c>=7
+ // cond: isPowerOfTwo64(c+1) && c>=7
// result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -13423,7 +13427,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c+1) && c >= 7) {
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
break
}
v.reset(OpARM64ADD)
@@ -13434,7 +13438,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -13443,7 +13447,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -13455,7 +13459,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -13464,7 +13468,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13476,7 +13480,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -13485,7 +13489,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -13497,7 +13501,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -13506,7 +13510,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13552,7 +13556,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SUBshiftLL a x [log2(c)])
for {
a := v_0
@@ -13561,7 +13565,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13570,7 +13574,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c-1) && c>=3
+ // cond: isPowerOfTwo64(c-1) && c>=3
// result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -13579,7 +13583,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c-1) && c >= 3) {
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
break
}
v.reset(OpARM64SUB)
@@ -13590,7 +13594,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c+1) && c>=7
+ // cond: isPowerOfTwo64(c+1) && c>=7
// result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -13599,7 +13603,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c+1) && c >= 7) {
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
break
}
v.reset(OpARM64ADD)
@@ -13610,7 +13614,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -13619,7 +13623,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -13631,7 +13635,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -13640,7 +13644,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13652,7 +13656,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -13661,7 +13665,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -13673,7 +13677,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
return true
}
// match: (MSUB a (MOVDconst [c]) x)
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -13682,7 +13686,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13783,7 +13787,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SUBshiftLL a x [log2(c)])
for {
a := v_0
@@ -13792,7 +13796,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13801,7 +13805,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
// result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -13810,7 +13814,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
v.reset(OpARM64SUB)
@@ -13821,7 +13825,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
// result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -13830,7 +13834,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
v.reset(OpARM64ADD)
@@ -13841,7 +13845,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -13850,7 +13854,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -13862,7 +13866,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -13871,7 +13875,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13883,7 +13887,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -13892,7 +13896,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -13904,7 +13908,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -13913,7 +13917,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
break
}
c := auxIntToInt64(v_2.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13974,7 +13978,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SUBshiftLL a x [log2(c)])
for {
a := v_0
@@ -13983,7 +13987,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -13992,7 +13996,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
// result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
for {
a := v_0
@@ -14001,7 +14005,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
break
}
v.reset(OpARM64SUB)
@@ -14012,7 +14016,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
// result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
for {
a := v_0
@@ -14021,7 +14025,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
break
}
v.reset(OpARM64ADD)
@@ -14032,7 +14036,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
for {
a := v_0
@@ -14041,7 +14045,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -14053,7 +14057,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
for {
a := v_0
@@ -14062,7 +14066,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -14074,7 +14078,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
// result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
for {
a := v_0
@@ -14083,7 +14087,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
break
}
v.reset(OpARM64ADDshiftLL)
@@ -14095,7 +14099,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
return true
}
// match: (MSUBW a (MOVDconst [c]) x)
- // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
// result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
for {
a := v_0
@@ -14104,7 +14108,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
}
c := auxIntToInt64(v_1.AuxInt)
x := v_2
- if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
break
}
v.reset(OpARM64SUBshiftLL)
@@ -14210,7 +14214,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SLLconst [log2(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14219,7 +14223,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14230,7 +14234,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && c >= 3
+ // cond: isPowerOfTwo64(c-1) && c >= 3
// result: (ADDshiftLL x x [log2(c-1)])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14239,7 +14243,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c-1) && c >= 3) {
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
continue
}
v.reset(OpARM64ADDshiftLL)
@@ -14250,7 +14254,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && c >= 7
+ // cond: isPowerOfTwo64(c+1) && c >= 7
// result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14259,7 +14263,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c+1) && c >= 7) {
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
continue
}
v.reset(OpARM64ADDshiftLL)
@@ -14272,7 +14276,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14281,7 +14285,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14295,7 +14299,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14304,7 +14308,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14318,7 +14322,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
// result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14327,7 +14331,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14343,7 +14347,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
break
}
// match: (MUL x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14352,7 +14356,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14460,7 +14464,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SLLconst [log2(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14469,7 +14473,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14480,7 +14484,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
// result: (ADDshiftLL x x [log2(c-1)])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14489,7 +14493,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
continue
}
v.reset(OpARM64ADDshiftLL)
@@ -14500,7 +14504,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
// result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14509,7 +14513,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
continue
}
v.reset(OpARM64ADDshiftLL)
@@ -14522,7 +14526,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14531,7 +14535,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14545,7 +14549,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14554,7 +14558,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14568,7 +14572,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
// result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14577,7 +14581,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -14593,7 +14597,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
break
}
// match: (MULW x (MOVDconst [c]))
- // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -14602,7 +14606,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
continue
}
v.reset(OpARM64SLLconst)
@@ -20382,7 +20386,7 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
return true
}
// match: (UDIV x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SRLconst [log2(c)] x)
for {
x := v_0
@@ -20390,7 +20394,7 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64SRLconst)
@@ -20434,7 +20438,7 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
return true
}
// match: (UDIVW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c) && is32Bit(c)
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
// result: (SRLconst [log2(c)] x)
for {
x := v_0
@@ -20442,7 +20446,7 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c) && is32Bit(c)) {
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
break
}
v.reset(OpARM64SRLconst)
@@ -20498,7 +20502,7 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool {
return true
}
// match: (UMOD x (MOVDconst [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (ANDconst [c-1] x)
for {
x := v_0
@@ -20506,7 +20510,7 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpARM64ANDconst)
@@ -20567,7 +20571,7 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool {
return true
}
// match: (UMODW x (MOVDconst [c]))
- // cond: isPowerOfTwo(c) && is32Bit(c)
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
// result: (ANDconst [c-1] x)
for {
x := v_0
@@ -20575,7 +20579,7 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c) && is32Bit(c)) {
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
break
}
v.reset(OpARM64ANDconst)
@@ -21340,6 +21344,25 @@ func rewriteValueARM64_OpAddr(v *Value) bool {
return true
}
}
+func rewriteValueARM64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -21359,6 +21382,25 @@ func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
return true
}
}
+func rewriteValueARM64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM64_OpAtomicOr8(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index bdafa9a957..0f0954fb83 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -44,6 +44,9 @@ func rewriteValueMIPS(v *Value) bool {
case OpAtomicAdd32:
v.Op = OpMIPSLoweredAtomicAdd
return true
+ case OpAtomicAnd32:
+ v.Op = OpMIPSLoweredAtomicAnd
+ return true
case OpAtomicAnd8:
return rewriteValueMIPS_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
@@ -61,6 +64,9 @@ func rewriteValueMIPS(v *Value) bool {
case OpAtomicLoadPtr:
v.Op = OpMIPSLoweredAtomicLoad32
return true
+ case OpAtomicOr32:
+ v.Op = OpMIPSLoweredAtomicOr
+ return true
case OpAtomicOr8:
return rewriteValueMIPS_OpAtomicOr8(v)
case OpAtomicStore32:
@@ -862,11 +868,11 @@ func rewriteValueMIPS_OpConst8(v *Value) bool {
}
func rewriteValueMIPS_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
- // result: (MOVWconst [int32(b2i(b))])
+ // result: (MOVWconst [b2i32(b)])
for {
b := auxIntToBool(v.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int32ToAuxInt(int32(b2i(b)))
+ v.AuxInt = int32ToAuxInt(b2i32(b))
return true
}
}
@@ -3846,7 +3852,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
break
}
// match: (MUL (MOVWconst [c]) x )
- // cond: isPowerOfTwo(int64(uint32(c)))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
// result: (SLLconst [int32(log2uint32(int64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@@ -3855,7 +3861,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
}
c := auxIntToInt32(v_0.AuxInt)
x := v_1
- if !(isPowerOfTwo(int64(uint32(c)))) {
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSLLconst)
@@ -6382,7 +6388,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
break
}
// match: (Select0 (MULTU (MOVWconst [c]) x ))
- // cond: isPowerOfTwo(int64(uint32(c)))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
// result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
@@ -6397,7 +6403,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
}
c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
- if !(isPowerOfTwo(int64(uint32(c)))) {
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSRLconst)
@@ -6570,7 +6576,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
break
}
// match: (Select1 (MULTU (MOVWconst [c]) x ))
- // cond: isPowerOfTwo(int64(uint32(c)))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
// result: (SLLconst [int32(log2uint32(int64(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
@@ -6585,7 +6591,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
}
c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
- if !(isPowerOfTwo(int64(uint32(c)))) {
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSLLconst)
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index dfff1c03b7..d0751ee5c3 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -6865,7 +6865,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool {
return true
}
// match: (Select0 (DIVVU x (MOVVconst [c])))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (ANDconst [c-1] x)
for {
if v_0.Op != OpMIPS64DIVVU {
@@ -6878,7 +6878,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool {
break
}
c := auxIntToInt64(v_0_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpMIPS64ANDconst)
@@ -7012,7 +7012,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
break
}
// match: (Select1 (MULVU x (MOVVconst [c])))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SLLVconst [log2(c)] x)
for {
if v_0.Op != OpMIPS64MULVU {
@@ -7027,7 +7027,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
continue
}
c := auxIntToInt64(v_0_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
continue
}
v.reset(OpMIPS64SLLVconst)
@@ -7053,7 +7053,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
return true
}
// match: (Select1 (DIVVU x (MOVVconst [c])))
- // cond: isPowerOfTwo(c)
+ // cond: isPowerOfTwo64(c)
// result: (SRLVconst [log2(c)] x)
for {
if v_0.Op != OpMIPS64DIVVU {
@@ -7066,7 +7066,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool {
break
}
c := auxIntToInt64(v_0_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isPowerOfTwo64(c)) {
break
}
v.reset(OpMIPS64SRLVconst)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 29ec3992f2..e5a23e8625 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -59,6 +59,9 @@ func rewriteValuePPC64(v *Value) bool {
case OpAtomicAdd64:
v.Op = OpPPC64LoweredAtomicAdd64
return true
+ case OpAtomicAnd32:
+ v.Op = OpPPC64LoweredAtomicAnd32
+ return true
case OpAtomicAnd8:
v.Op = OpPPC64LoweredAtomicAnd8
return true
@@ -82,8 +85,13 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpAtomicLoad8(v)
case OpAtomicLoadAcq32:
return rewriteValuePPC64_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadAcq64:
+ return rewriteValuePPC64_OpAtomicLoadAcq64(v)
case OpAtomicLoadPtr:
return rewriteValuePPC64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpPPC64LoweredAtomicOr32
+ return true
case OpAtomicOr8:
v.Op = OpPPC64LoweredAtomicOr8
return true
@@ -95,6 +103,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpAtomicStore8(v)
case OpAtomicStoreRel32:
return rewriteValuePPC64_OpAtomicStoreRel32(v)
+ case OpAtomicStoreRel64:
+ return rewriteValuePPC64_OpAtomicStoreRel64(v)
case OpAvg64u:
return rewriteValuePPC64_OpAvg64u(v)
case OpBitLen32:
@@ -434,6 +444,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ANDN(v)
case OpPPC64ANDconst:
return rewriteValuePPC64_OpPPC64ANDconst(v)
+ case OpPPC64CLRLSLDI:
+ return rewriteValuePPC64_OpPPC64CLRLSLDI(v)
case OpPPC64CMP:
return rewriteValuePPC64_OpPPC64CMP(v)
case OpPPC64CMPU:
@@ -568,6 +580,10 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64MOVWstorezero(v)
case OpPPC64MTVSRD:
return rewriteValuePPC64_OpPPC64MTVSRD(v)
+ case OpPPC64MULLD:
+ return rewriteValuePPC64_OpPPC64MULLD(v)
+ case OpPPC64MULLW:
+ return rewriteValuePPC64_OpPPC64MULLW(v)
case OpPPC64NEG:
return rewriteValuePPC64_OpPPC64NEG(v)
case OpPPC64NOR:
@@ -584,6 +600,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64ROTL(v)
case OpPPC64ROTLW:
return rewriteValuePPC64_OpPPC64ROTLW(v)
+ case OpPPC64ROTLWconst:
+ return rewriteValuePPC64_OpPPC64ROTLWconst(v)
case OpPPC64SLD:
return rewriteValuePPC64_OpPPC64SLD(v)
case OpPPC64SLDconst:
@@ -600,6 +618,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64SRD(v)
case OpPPC64SRW:
return rewriteValuePPC64_OpPPC64SRW(v)
+ case OpPPC64SRWconst:
+ return rewriteValuePPC64_OpPPC64SRWconst(v)
case OpPPC64SUB:
return rewriteValuePPC64_OpPPC64SUB(v)
case OpPPC64SUBFCconst:
@@ -926,6 +946,20 @@ func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpAtomicLoadAcq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq64 ptr mem)
+ // result: (LoweredAtomicLoad64 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -1004,6 +1038,22 @@ func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpAtomicStoreRel64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel64 ptr val mem)
+ // result: (LoweredAtomicStore64 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
func rewriteValuePPC64_OpAvg64u(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -4168,6 +4218,100 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ // match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLWconst {
+ continue
+ }
+ r := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLW {
+ continue
+ }
+ r := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (AND x (NOR y y))
// result: (ANDN x y)
for {
@@ -4303,6 +4447,76 @@ func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
v_0 := v.Args[0]
+ // match: (ANDconst [m] (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLW {
+ break
+ }
+ r := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
// match: (ANDconst [c] (ANDconst [d] x))
// result: (ANDconst [c&d] x)
for {
@@ -4467,6 +4681,47 @@ func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CLRLSLDI [c] (SRWconst [s] x))
+ // cond: mergePPC64ClrlsldiSrw(int64(c),s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64ClrlsldiSrw(int64(c), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrw(int64(c), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CLRLSLDI [c] i:(RLWINM [s] x))
+ // cond: mergePPC64ClrlsldiRlwinm(c,s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ i := v_0
+ if i.Op != OpPPC64RLWINM {
+ break
+ }
+ s := auxIntToInt64(i.AuxInt)
+ x := i.Args[0]
+ if !(mergePPC64ClrlsldiRlwinm(c, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiRlwinm(c, s))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -11003,6 +11258,56 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64MULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64NEG(v *Value) bool {
v_0 := v.Args[0]
// match: (NEG (ADDconst [c] x))
@@ -12756,6 +13061,55 @@ func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROTLWconst [r] (AND (MOVDconst [m]) x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ROTLWconst [r] (ANDconst [m] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -12776,6 +13130,24 @@ func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
v_0 := v.Args[0]
+ // match: (SLDconst [l] (SRWconst [r] x))
+ // cond: mergePPC64SldiSrw(l,r) != 0
+ // result: (RLWINM [mergePPC64SldiSrw(l,r)] x)
+ for {
+ l := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64SldiSrw(l, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64SldiSrw(l, r))
+ v.AddArg(x)
+ return true
+ }
// match: (SLDconst [c] z:(MOVBZreg x))
// cond: c < 8 && z.Uses == 1
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
@@ -12831,7 +13203,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
return true
}
// match: (SLDconst [c] z:(ANDconst [d] x))
- // cond: z.Uses == 1 && isPPC64ValidShiftMask(d)
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
for {
c := auxIntToInt64(v.AuxInt)
@@ -12841,7 +13213,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
}
d := auxIntToInt64(z.AuxInt)
x := z.Args[0]
- if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) {
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
break
}
v.reset(OpPPC64CLRLSLDI)
@@ -12850,7 +13222,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
return true
}
// match: (SLDconst [c] z:(AND (MOVDconst [d]) x))
- // cond: z.Uses == 1 && isPPC64ValidShiftMask(d)
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d))
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
for {
c := auxIntToInt64(v.AuxInt)
@@ -12867,7 +13239,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
}
d := auxIntToInt64(z_0.AuxInt)
x := z_1
- if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) {
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
continue
}
v.reset(OpPPC64CLRLSLDI)
@@ -12953,26 +13325,8 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (SLWconst [c] z:(MOVWZreg x))
- // cond: z.Uses == 1 && c < 24
- // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x)
- for {
- c := auxIntToInt64(v.AuxInt)
- z := v_0
- if z.Op != OpPPC64MOVWZreg {
- break
- }
- x := z.Args[0]
- if !(z.Uses == 1 && c < 24) {
- break
- }
- v.reset(OpPPC64CLRLSLWI)
- v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 8, 31, 32))
- v.AddArg(x)
- return true
- }
// match: (SLWconst [c] z:(ANDconst [d] x))
- // cond: z.Uses == 1 && isPPC64ValidShiftMask(d)
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
// result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
for {
c := auxIntToInt64(v.AuxInt)
@@ -12982,7 +13336,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
}
d := auxIntToInt64(z.AuxInt)
x := z.Args[0]
- if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) {
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
break
}
v.reset(OpPPC64CLRLSLWI)
@@ -12991,7 +13345,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
return true
}
// match: (SLWconst [c] z:(AND (MOVDconst [d]) x))
- // cond: z.Uses == 1 && isPPC64ValidShiftMask(d)
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
// result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
for {
c := auxIntToInt64(v.AuxInt)
@@ -13008,7 +13362,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
}
d := auxIntToInt64(z_0.AuxInt)
x := z_1
- if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) {
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
continue
}
v.reset(OpPPC64CLRLSLWI)
@@ -13110,6 +13464,96 @@ func rewriteValuePPC64_OpPPC64SRW(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst (ANDconst [m] x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRWconst (ANDconst [m] x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index c178290343..c337ffbfe7 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -420,8 +420,12 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64AND(v)
case OpRISCV64MOVBUload:
return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
+ case OpRISCV64MOVBUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBUreg(v)
case OpRISCV64MOVBload:
return rewriteValueRISCV64_OpRISCV64MOVBload(v)
+ case OpRISCV64MOVBreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBreg(v)
case OpRISCV64MOVBstore:
return rewriteValueRISCV64_OpRISCV64MOVBstore(v)
case OpRISCV64MOVBstorezero:
@@ -430,22 +434,32 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64MOVDconst(v)
case OpRISCV64MOVDload:
return rewriteValueRISCV64_OpRISCV64MOVDload(v)
+ case OpRISCV64MOVDreg:
+ return rewriteValueRISCV64_OpRISCV64MOVDreg(v)
case OpRISCV64MOVDstore:
return rewriteValueRISCV64_OpRISCV64MOVDstore(v)
case OpRISCV64MOVDstorezero:
return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v)
case OpRISCV64MOVHUload:
return rewriteValueRISCV64_OpRISCV64MOVHUload(v)
+ case OpRISCV64MOVHUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHUreg(v)
case OpRISCV64MOVHload:
return rewriteValueRISCV64_OpRISCV64MOVHload(v)
+ case OpRISCV64MOVHreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHreg(v)
case OpRISCV64MOVHstore:
return rewriteValueRISCV64_OpRISCV64MOVHstore(v)
case OpRISCV64MOVHstorezero:
return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v)
case OpRISCV64MOVWUload:
return rewriteValueRISCV64_OpRISCV64MOVWUload(v)
+ case OpRISCV64MOVWUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWUreg(v)
case OpRISCV64MOVWload:
return rewriteValueRISCV64_OpRISCV64MOVWload(v)
+ case OpRISCV64MOVWreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWreg(v)
case OpRISCV64MOVWstore:
return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
case OpRISCV64MOVWstorezero:
@@ -543,17 +557,23 @@ func rewriteValueRISCV64(v *Value) bool {
case OpRsh8x8:
return rewriteValueRISCV64_OpRsh8x8(v)
case OpSignExt16to32:
- return rewriteValueRISCV64_OpSignExt16to32(v)
+ v.Op = OpRISCV64MOVHreg
+ return true
case OpSignExt16to64:
- return rewriteValueRISCV64_OpSignExt16to64(v)
+ v.Op = OpRISCV64MOVHreg
+ return true
case OpSignExt32to64:
- return rewriteValueRISCV64_OpSignExt32to64(v)
+ v.Op = OpRISCV64MOVWreg
+ return true
case OpSignExt8to16:
- return rewriteValueRISCV64_OpSignExt8to16(v)
+ v.Op = OpRISCV64MOVBreg
+ return true
case OpSignExt8to32:
- return rewriteValueRISCV64_OpSignExt8to32(v)
+ v.Op = OpRISCV64MOVBreg
+ return true
case OpSignExt8to64:
- return rewriteValueRISCV64_OpSignExt8to64(v)
+ v.Op = OpRISCV64MOVBreg
+ return true
case OpSlicemask:
return rewriteValueRISCV64_OpSlicemask(v)
case OpSqrt:
@@ -621,17 +641,23 @@ func rewriteValueRISCV64(v *Value) bool {
case OpZero:
return rewriteValueRISCV64_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueRISCV64_OpZeroExt16to32(v)
+ v.Op = OpRISCV64MOVHUreg
+ return true
case OpZeroExt16to64:
- return rewriteValueRISCV64_OpZeroExt16to64(v)
+ v.Op = OpRISCV64MOVHUreg
+ return true
case OpZeroExt32to64:
- return rewriteValueRISCV64_OpZeroExt32to64(v)
+ v.Op = OpRISCV64MOVWUreg
+ return true
case OpZeroExt8to16:
- return rewriteValueRISCV64_OpZeroExt8to16(v)
+ v.Op = OpRISCV64MOVBUreg
+ return true
case OpZeroExt8to32:
- return rewriteValueRISCV64_OpZeroExt8to32(v)
+ v.Op = OpRISCV64MOVBUreg
+ return true
case OpZeroExt8to64:
- return rewriteValueRISCV64_OpZeroExt8to64(v)
+ v.Op = OpRISCV64MOVBUreg
+ return true
}
return false
}
@@ -844,15 +870,17 @@ func rewriteValueRISCV64_OpEq16(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Eq16 x y)
- // result: (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
+ // result: (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SEQZ)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1.AddArg2(x, y)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -895,15 +923,17 @@ func rewriteValueRISCV64_OpEq8(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Eq8 x y)
- // result: (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
+ // result: (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SEQZ)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1.AddArg2(x, y)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -1987,6 +2017,23 @@ func rewriteValueRISCV64_OpMove(v *Value) bool {
return true
}
// match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s >= 16 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s >= 16 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
// cond: (s <= 16 || logLargeCopy(v, s))
// result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem)
for {
@@ -2052,15 +2099,17 @@ func rewriteValueRISCV64_OpNeq16(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neq16 x y)
- // result: (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
+ // result: (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1.AddArg2(x, y)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -2103,15 +2152,17 @@ func rewriteValueRISCV64_OpNeq8(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neq8 x y)
- // result: (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
+ // result: (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1.AddArg2(x, y)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
v.AddArg(v0)
return true
}
@@ -2458,6 +2509,57 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -2506,6 +2608,57 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -2571,6 +2724,108 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
@@ -2714,6 +2969,22 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -2877,6 +3148,79 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -2925,6 +3269,101 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -2990,6 +3429,74 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
@@ -3088,6 +3595,101 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -3136,6 +3738,134 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3201,6 +3931,40 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
@@ -4719,103 +5483,6 @@ func rewriteValueRISCV64_OpRsh8x8(v *Value) bool {
return true
}
}
-func rewriteValueRISCV64_OpSignExt16to32(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (SignExt16to32 <t> x)
- // result: (SRAI [48] (SLLI <t> [48] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRAI)
- v.AuxInt = int64ToAuxInt(48)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(48)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpSignExt16to64(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (SignExt16to64 <t> x)
- // result: (SRAI [48] (SLLI <t> [48] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRAI)
- v.AuxInt = int64ToAuxInt(48)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(48)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpSignExt32to64(v *Value) bool {
- v_0 := v.Args[0]
- // match: (SignExt32to64 <t> x)
- // result: (ADDIW [0] x)
- for {
- x := v_0
- v.reset(OpRISCV64ADDIW)
- v.AuxInt = int64ToAuxInt(0)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueRISCV64_OpSignExt8to16(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (SignExt8to16 <t> x)
- // result: (SRAI [56] (SLLI <t> [56] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRAI)
- v.AuxInt = int64ToAuxInt(56)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(56)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpSignExt8to32(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (SignExt8to32 <t> x)
- // result: (SRAI [56] (SLLI <t> [56] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRAI)
- v.AuxInt = int64ToAuxInt(56)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(56)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpSignExt8to64(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (SignExt8to64 <t> x)
- // result: (SRAI [56] (SLLI <t> [56] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRAI)
- v.AuxInt = int64ToAuxInt(56)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(56)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueRISCV64_OpSlicemask(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -5000,6 +5667,22 @@ func rewriteValueRISCV64_OpZero(v *Value) bool {
return true
}
// match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s >= 16 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s >= 16 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpRISCV64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
// result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem)
for {
s := auxIntToInt64(v.AuxInt)
@@ -5016,108 +5699,6 @@ func rewriteValueRISCV64_OpZero(v *Value) bool {
return true
}
}
-func rewriteValueRISCV64_OpZeroExt16to32(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (ZeroExt16to32 <t> x)
- // result: (SRLI [48] (SLLI <t> [48] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRLI)
- v.AuxInt = int64ToAuxInt(48)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(48)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpZeroExt16to64(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (ZeroExt16to64 <t> x)
- // result: (SRLI [48] (SLLI <t> [48] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRLI)
- v.AuxInt = int64ToAuxInt(48)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(48)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpZeroExt32to64(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (ZeroExt32to64 <t> x)
- // result: (SRLI [32] (SLLI <t> [32] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRLI)
- v.AuxInt = int64ToAuxInt(32)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(32)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpZeroExt8to16(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (ZeroExt8to16 <t> x)
- // result: (SRLI [56] (SLLI <t> [56] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRLI)
- v.AuxInt = int64ToAuxInt(56)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(56)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpZeroExt8to32(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (ZeroExt8to32 <t> x)
- // result: (SRLI [56] (SLLI <t> [56] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRLI)
- v.AuxInt = int64ToAuxInt(56)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(56)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueRISCV64_OpZeroExt8to64(v *Value) bool {
- v_0 := v.Args[0]
- b := v.Block
- // match: (ZeroExt8to64 <t> x)
- // result: (SRLI [56] (SLLI <t> [56] x))
- for {
- t := v.Type
- x := v_0
- v.reset(OpRISCV64SRLI)
- v.AuxInt = int64ToAuxInt(56)
- v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
- v0.AuxInt = int64ToAuxInt(56)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
func rewriteBlockRISCV64(b *Block) bool {
switch b.Kind {
case BlockRISCV64BEQ:
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index 78a57c2388..35b691c12d 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -49,6 +49,9 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpAtomicAdd32(v)
case OpAtomicAdd64:
return rewriteValueS390X_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ v.Op = OpS390XLAN
+ return true
case OpAtomicAnd8:
return rewriteValueS390X_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
@@ -69,6 +72,9 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpAtomicLoadAcq32(v)
case OpAtomicLoadPtr:
return rewriteValueS390X_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpS390XLAO
+ return true
case OpAtomicOr8:
return rewriteValueS390X_OpAtomicOr8(v)
case OpAtomicStore32:
diff --git a/src/cmd/compile/internal/ssa/rewrite_test.go b/src/cmd/compile/internal/ssa/rewrite_test.go
index 1a15d8c940..6fe429e85a 100644
--- a/src/cmd/compile/internal/ssa/rewrite_test.go
+++ b/src/cmd/compile/internal/ssa/rewrite_test.go
@@ -36,3 +36,184 @@ func TestSubFlags(t *testing.T) {
t.Errorf("subFlags32(0,1).ult() returned false")
}
}
+
+func TestIsPPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ input int64
+ expected bool
+ }{
+ {0x00000001, true},
+ {0x80000001, true},
+ {0x80010001, false},
+ {0xFFFFFFFA, false},
+ {0xF0F0F0F0, false},
+ {0xFFFFFFFD, true},
+ {0x80000000, true},
+ {0x00000000, false},
+ {0xFFFFFFFF, true},
+ {0x0000FFFF, true},
+ {0xFF0000FF, true},
+ {0x00FFFF00, true},
+ }
+
+ for _, v := range tests {
+ if v.expected != isPPC64WordRotateMask(v.input) {
+ t.Errorf("isPPC64WordRotateMask(0x%x) failed", v.input)
+ }
+ }
+}
+
+func TestEncodeDecodePPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ rotate int64
+ mask uint64
+ nbits,
+ mb,
+ me,
+ encoded int64
+ }{
+ {1, 0x00000001, 32, 31, 31, 0x20011f20},
+ {2, 0x80000001, 32, 31, 0, 0x20021f01},
+ {3, 0xFFFFFFFD, 32, 31, 29, 0x20031f1e},
+ {4, 0x80000000, 32, 0, 0, 0x20040001},
+ {5, 0xFFFFFFFF, 32, 0, 31, 0x20050020},
+ {6, 0x0000FFFF, 32, 16, 31, 0x20061020},
+ {7, 0xFF0000FF, 32, 24, 7, 0x20071808},
+ {8, 0x00FFFF00, 32, 8, 23, 0x20080818},
+
+ {9, 0x0000000000FFFF00, 64, 40, 55, 0x40092838},
+ {10, 0xFFFF000000000000, 64, 0, 15, 0x400A0010},
+ {10, 0xFFFF000000000001, 64, 63, 15, 0x400A3f10},
+ }
+
+ for i, v := range tests {
+ result := encodePPC64RotateMask(v.rotate, int64(v.mask), v.nbits)
+ if result != v.encoded {
+ t.Errorf("encodePPC64RotateMask(%d,0x%x,%d) = 0x%x, expected 0x%x", v.rotate, v.mask, v.nbits, result, v.encoded)
+ }
+ rotate, mb, me, mask := DecodePPC64RotateMask(result)
+ if rotate != v.rotate || mb != v.mb || me != v.me || mask != v.mask {
+ t.Errorf("DecodePPC64Failure(Test %d) got (%d, %d, %d, %x) expected (%d, %d, %d, %x)", i, rotate, mb, me, mask, v.rotate, v.mb, v.me, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiSrw(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), 4, true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), 4, true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 4, false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), 4, true, 12, 0xFFFF0000},
+ // ((x>>32)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiSrw(int64(v.clrlsldi), v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiRlwinm(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ rlwinm int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x<<4)&0xFF00)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(4, 0xFF00, 32), false, 0, 0},
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(28, 0x0FFFFFFF, 32), true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 12, 0xFFFF0000},
+ // ((x>>4)&0xF000FFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xF000FFFF, 32), true, 12, 0xFFFF0000},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiRlwinm(v.clrlsldi, v.rlwinm)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64SldiSrw(t *testing.T) {
+ tests := []struct {
+ sld int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {4, 4, true, 0, 0xFFFFFFF0},
+ {4, 8, true, 28, 0x0FFFFFF0},
+ {0, 0, true, 0, 0xFFFFFFFF},
+ {8, 4, false, 0, 0},
+ {0, 32, false, 0, 0},
+ {0, 31, true, 1, 0x1},
+ {31, 31, true, 0, 0x80000000},
+ {32, 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64SldiSrw(v.sld, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64SldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64AndSrwi(t *testing.T) {
+ tests := []struct {
+ and int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {0x000000FF, 8, true, 24, 0xFF},
+ {0xF00000FF, 8, true, 24, 0xFF},
+ {0x0F0000FF, 4, false, 0, 0},
+ {0x00000000, 4, false, 0, 0},
+ {0xF0000000, 4, false, 0, 0},
+ {0xF0000000, 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64AndSrwi(v.and, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64AndSrwi(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index 86fbc9901a..8b9753414f 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -62,6 +62,8 @@ func rewriteValuedec64(v *Value) bool {
return rewriteValuedec64_OpNeg64(v)
case OpNeq64:
return rewriteValuedec64_OpNeq64(v)
+ case OpOr32:
+ return rewriteValuedec64_OpOr32(v)
case OpOr64:
return rewriteValuedec64_OpOr64(v)
case OpRsh16Ux64:
@@ -728,7 +730,23 @@ func rewriteValuedec64_OpLsh16x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Lsh16x64 x y)
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpLsh32x64(v *Value) bool {
v_1 := v.Args[1]
@@ -793,83 +811,97 @@ func rewriteValuedec64_OpLsh32x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Lsh32x64 x y)
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpLsh64x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Lsh64x16 (Int64Make hi lo) s)
- // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x16 <typ.UInt32> hi s) (Rsh32Ux16 <typ.UInt32> lo (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Lsh32x16 <typ.UInt32> lo (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))) (Lsh32x16 <typ.UInt32> lo s))
+ // match: (Lsh64x16 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x16 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Lsh32x16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))) (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
- v2.AddArg2(hi, s)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
- v4 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
- v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v5.AuxInt = int16ToAuxInt(32)
- v4.AddArg2(v5, s)
- v3.AddArg2(lo, v4)
- v1.AddArg2(v2, v3)
- v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
- v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
- v7.AddArg2(s, v5)
- v6.AddArg2(lo, v7)
- v0.AddArg2(v1, v6)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v7 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v7.AuxInt = int16ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
v8 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
- v8.AddArg2(lo, s)
- v.AddArg2(v0, v8)
+ v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
return true
}
- return false
}
func rewriteValuedec64_OpLsh64x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Lsh64x32 (Int64Make hi lo) s)
- // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x32 <typ.UInt32> hi s) (Rsh32Ux32 <typ.UInt32> lo (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Lsh32x32 <typ.UInt32> lo (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))) (Lsh32x32 <typ.UInt32> lo s))
+ // match: (Lsh64x32 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x32 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Lsh32x32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))) (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
- v2.AddArg2(hi, s)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
- v4 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v5.AuxInt = int32ToAuxInt(32)
- v4.AddArg2(v5, s)
- v3.AddArg2(lo, v4)
- v1.AddArg2(v2, v3)
- v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
- v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
- v7.AddArg2(s, v5)
- v6.AddArg2(lo, v7)
- v0.AddArg2(v1, v6)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
- v8.AddArg2(lo, s)
- v.AddArg2(v0, v8)
+ v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
return true
}
- return false
}
func rewriteValuedec64_OpLsh64x64(v *Value) bool {
v_1 := v.Args[1]
@@ -934,45 +966,60 @@ func rewriteValuedec64_OpLsh64x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Lsh64x64 x y)
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpLsh64x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Lsh64x8 (Int64Make hi lo) s)
- // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x8 <typ.UInt32> hi s) (Rsh32Ux8 <typ.UInt32> lo (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Lsh32x8 <typ.UInt32> lo (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))) (Lsh32x8 <typ.UInt32> lo s))
+ // match: (Lsh64x8 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x8 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Lsh32x8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))) (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
- v2.AddArg2(hi, s)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
- v4 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
- v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v5.AuxInt = int8ToAuxInt(32)
- v4.AddArg2(v5, s)
- v3.AddArg2(lo, v4)
- v1.AddArg2(v2, v3)
- v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
- v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
- v7.AddArg2(s, v5)
- v6.AddArg2(lo, v7)
- v0.AddArg2(v1, v6)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v7 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v7.AuxInt = int8ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
v8 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
- v8.AddArg2(lo, s)
- v.AddArg2(v0, v8)
+ v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
return true
}
- return false
}
func rewriteValuedec64_OpLsh8x64(v *Value) bool {
v_1 := v.Args[1]
@@ -1037,7 +1084,23 @@ func rewriteValuedec64_OpLsh8x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Lsh8x64 x y)
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpMul64(v *Value) bool {
v_1 := v.Args[1]
@@ -1118,6 +1181,64 @@ func rewriteValuedec64_OpNeq64(v *Value) bool {
return true
}
}
+func rewriteValuedec64_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c == 0
+ // result: y
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ y := v_1
+ if !(c == 0) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c != 0
+ // result: (Const32 <typ.UInt32> [-1])
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if !(c != 0) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.Type = typ.UInt32
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValuedec64_OpOr64(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -1208,7 +1329,23 @@ func rewriteValuedec64_OpRsh16Ux64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh16Ux64 x y)
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh16x64(v *Value) bool {
v_1 := v.Args[1]
@@ -1276,7 +1413,23 @@ func rewriteValuedec64_OpRsh16x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh16x64 x y)
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh32Ux64(v *Value) bool {
v_1 := v.Args[1]
@@ -1341,7 +1494,23 @@ func rewriteValuedec64_OpRsh32Ux64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh32Ux64 x y)
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh32x64(v *Value) bool {
v_1 := v.Args[1]
@@ -1407,83 +1576,97 @@ func rewriteValuedec64_OpRsh32x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh32x64 x y)
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh64Ux16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Rsh64Ux16 (Int64Make hi lo) s)
- // result: (Int64Make (Rsh32Ux16 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> lo s) (Lsh32x16 <typ.UInt32> hi (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Rsh32Ux16 <typ.UInt32> hi (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+ // match: (Rsh64Ux16 x s)
+ // result: (Int64Make (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Rsh32Ux16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
- v0.AddArg2(hi, s)
- v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
- v3.AddArg2(lo, s)
- v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
- v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v6.AuxInt = int16ToAuxInt(32)
- v5.AddArg2(v6, s)
- v4.AddArg2(hi, v5)
- v2.AddArg2(v3, v4)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
- v8 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
- v8.AddArg2(s, v6)
- v7.AddArg2(hi, v8)
- v1.AddArg2(v2, v7)
- v.AddArg2(v0, v1)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
return true
}
- return false
}
func rewriteValuedec64_OpRsh64Ux32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Rsh64Ux32 (Int64Make hi lo) s)
- // result: (Int64Make (Rsh32Ux32 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> lo s) (Lsh32x32 <typ.UInt32> hi (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Rsh32Ux32 <typ.UInt32> hi (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+ // match: (Rsh64Ux32 x s)
+ // result: (Int64Make (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Rsh32Ux32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
- v0.AddArg2(hi, s)
- v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
- v3.AddArg2(lo, s)
- v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
- v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v6.AuxInt = int32ToAuxInt(32)
- v5.AddArg2(v6, s)
- v4.AddArg2(hi, v5)
- v2.AddArg2(v3, v4)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
- v8 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
- v8.AddArg2(s, v6)
- v7.AddArg2(hi, v8)
- v1.AddArg2(v2, v7)
- v.AddArg2(v0, v1)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
return true
}
- return false
}
func rewriteValuedec64_OpRsh64Ux64(v *Value) bool {
v_1 := v.Args[1]
@@ -1548,139 +1731,152 @@ func rewriteValuedec64_OpRsh64Ux64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh64Ux64 x y)
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh64Ux8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Rsh64Ux8 (Int64Make hi lo) s)
- // result: (Int64Make (Rsh32Ux8 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> lo s) (Lsh32x8 <typ.UInt32> hi (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Rsh32Ux8 <typ.UInt32> hi (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+ // match: (Rsh64Ux8 x s)
+ // result: (Int64Make (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Rsh32Ux8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
- v0.AddArg2(hi, s)
- v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
- v3.AddArg2(lo, s)
- v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
- v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v6.AuxInt = int8ToAuxInt(32)
- v5.AddArg2(v6, s)
- v4.AddArg2(hi, v5)
- v2.AddArg2(v3, v4)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
- v8 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
- v8.AddArg2(s, v6)
- v7.AddArg2(hi, v8)
- v1.AddArg2(v2, v7)
- v.AddArg2(v0, v1)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
return true
}
- return false
}
func rewriteValuedec64_OpRsh64x16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Rsh64x16 (Int64Make hi lo) s)
- // result: (Int64Make (Rsh32x16 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> lo s) (Lsh32x16 <typ.UInt32> hi (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (And32 <typ.UInt32> (Rsh32x16 <typ.UInt32> hi (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+ // match: (Rsh64x16 x s)
+ // result: (Int64Make (Rsh32x16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (And32 <typ.UInt32> (Rsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
- v0.AddArg2(hi, s)
- v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
- v3.AddArg2(lo, s)
- v4 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
- v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
- v6.AuxInt = int16ToAuxInt(32)
- v5.AddArg2(v6, s)
- v4.AddArg2(hi, v5)
- v2.AddArg2(v3, v4)
- v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
- v8 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
- v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
- v9.AddArg2(s, v6)
- v8.AddArg2(hi, v9)
- v10 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
- v11 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
- v12 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16)
- v13 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v13.AuxInt = int32ToAuxInt(5)
- v12.AddArg2(s, v13)
- v11.AddArg(v12)
- v10.AddArg(v11)
- v7.AddArg2(v8, v10)
- v1.AddArg2(v2, v7)
- v.AddArg2(v0, v1)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
return true
}
- return false
}
func rewriteValuedec64_OpRsh64x32(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Rsh64x32 (Int64Make hi lo) s)
- // result: (Int64Make (Rsh32x32 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> lo s) (Lsh32x32 <typ.UInt32> hi (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (And32 <typ.UInt32> (Rsh32x32 <typ.UInt32> hi (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))) (Zeromask (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+ // match: (Rsh64x32 x s)
+ // result: (Int64Make (Rsh32x32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (And32 <typ.UInt32> (Rsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))) (Zeromask (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
- v0.AddArg2(hi, s)
- v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
- v3.AddArg2(lo, s)
- v4 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
- v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v6.AuxInt = int32ToAuxInt(32)
- v5.AddArg2(v6, s)
- v4.AddArg2(hi, v5)
- v2.AddArg2(v3, v4)
- v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
- v8 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
- v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
- v9.AddArg2(s, v6)
- v8.AddArg2(hi, v9)
- v10 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
- v11 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
- v12 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v12.AuxInt = int32ToAuxInt(5)
- v11.AddArg2(s, v12)
- v10.AddArg(v11)
- v7.AddArg2(v8, v10)
- v1.AddArg2(v2, v7)
- v.AddArg2(v0, v1)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v14.AuxInt = int32ToAuxInt(5)
+ v13.AddArg2(s, v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
return true
}
- return false
}
func rewriteValuedec64_OpRsh64x64(v *Value) bool {
v_1 := v.Args[1]
@@ -1750,55 +1946,70 @@ func rewriteValuedec64_OpRsh64x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh64x64 x y)
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh64x8(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (Rsh64x8 (Int64Make hi lo) s)
- // result: (Int64Make (Rsh32x8 <typ.UInt32> hi s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> lo s) (Lsh32x8 <typ.UInt32> hi (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (And32 <typ.UInt32> (Rsh32x8 <typ.UInt32> hi (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+ // match: (Rsh64x8 x s)
+ // result: (Int64Make (Rsh32x8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (And32 <typ.UInt32> (Rsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
for {
- if v_0.Op != OpInt64Make {
- break
- }
- lo := v_0.Args[1]
- hi := v_0.Args[0]
+ x := v_0
s := v_1
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
- v0.AddArg2(hi, s)
- v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
- v3.AddArg2(lo, s)
- v4 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
- v5 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
- v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
- v6.AuxInt = int8ToAuxInt(32)
- v5.AddArg2(v6, s)
- v4.AddArg2(hi, v5)
- v2.AddArg2(v3, v4)
- v7 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
- v8 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
- v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
- v9.AddArg2(s, v6)
- v8.AddArg2(hi, v9)
- v10 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
- v11 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
- v12 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8)
- v13 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
- v13.AuxInt = int32ToAuxInt(5)
- v12.AddArg2(s, v13)
- v11.AddArg(v12)
- v10.AddArg(v11)
- v7.AddArg2(v8, v10)
- v1.AddArg2(v2, v7)
- v.AddArg2(v0, v1)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
return true
}
- return false
}
func rewriteValuedec64_OpRsh8Ux64(v *Value) bool {
v_1 := v.Args[1]
@@ -1863,7 +2074,23 @@ func rewriteValuedec64_OpRsh8Ux64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh8Ux64 x y)
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpRsh8x64(v *Value) bool {
v_1 := v.Args[1]
@@ -1931,7 +2158,23 @@ func rewriteValuedec64_OpRsh8x64(v *Value) bool {
v.AddArg2(x, v0)
return true
}
- return false
+ // match: (Rsh8x64 x y)
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
}
func rewriteValuedec64_OpSignExt16to64(v *Value) bool {
v_0 := v.Args[0]
@@ -2071,6 +2314,8 @@ func rewriteValuedec64_OpSub64(v *Value) bool {
}
func rewriteValuedec64_OpTrunc64to16(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (Trunc64to16 (Int64Make _ lo))
// result: (Trunc32to16 lo)
for {
@@ -2082,7 +2327,16 @@ func rewriteValuedec64_OpTrunc64to16(v *Value) bool {
v.AddArg(lo)
return true
}
- return false
+ // match: (Trunc64to16 x)
+ // result: (Trunc32to16 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
}
func rewriteValuedec64_OpTrunc64to32(v *Value) bool {
v_0 := v.Args[0]
@@ -2096,10 +2350,19 @@ func rewriteValuedec64_OpTrunc64to32(v *Value) bool {
v.copyOf(lo)
return true
}
- return false
+ // match: (Trunc64to32 x)
+ // result: (Int64Lo x)
+ for {
+ x := v_0
+ v.reset(OpInt64Lo)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuedec64_OpTrunc64to8(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (Trunc64to8 (Int64Make _ lo))
// result: (Trunc32to8 lo)
for {
@@ -2111,7 +2374,16 @@ func rewriteValuedec64_OpTrunc64to8(v *Value) bool {
v.AddArg(lo)
return true
}
- return false
+ // match: (Trunc64to8 x)
+ // result: (Trunc32to8 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
}
func rewriteValuedec64_OpXor64(v *Value) bool {
v_1 := v.Args[1]
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 925ff53fd1..11f4cc7c58 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -124,6 +124,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpIMake(v)
case OpInterCall:
return rewriteValuegeneric_OpInterCall(v)
+ case OpInterLECall:
+ return rewriteValuegeneric_OpInterLECall(v)
case OpIsInBounds:
return rewriteValuegeneric_OpIsInBounds(v)
case OpIsNonNil:
@@ -366,6 +368,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpSelect0(v)
case OpSelect1:
return rewriteValuegeneric_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValuegeneric_OpSelectN(v)
case OpSignExt16to32:
return rewriteValuegeneric_OpSignExt16to32(v)
case OpSignExt16to64:
@@ -8522,6 +8526,46 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool {
}
return false
}
+func rewriteValuegeneric_OpInterLECall(v *Value) bool {
+ // match: (InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___)
+ // cond: devirtLESym(v, auxCall, itab, off) != nil
+ // result: devirtLECall(v, devirtLESym(v, auxCall, itab, off))
+ for {
+ if len(v.Args) < 1 {
+ break
+ }
+ auxCall := auxToCall(v.Aux)
+ v_0 := v.Args[0]
+ if v_0.Op != OpLoad {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0_0 := v_0_0_0_0.Args[0]
+ if v_0_0_0_0_0.Op != OpAddr {
+ break
+ }
+ itab := auxToSym(v_0_0_0_0_0.Aux)
+ v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
+ if v_0_0_0_0_0_0.Op != OpSB || !(devirtLESym(v, auxCall, itab, off) != nil) {
+ break
+ }
+ v.copyOf(devirtLECall(v, devirtLESym(v, auxCall, itab, off)))
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpIsInBounds(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -16082,6 +16126,38 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
v.reset(OpInvalid)
return true
}
+ // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpNot(v *Value) bool {
@@ -18549,6 +18625,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
// match: (Phi (Const8 [c]) (Const8 [c]))
// result: (Const8 [c])
for {
+ if len(v.Args) != 2 {
+ break
+ }
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpConst8 {
@@ -18556,7 +18635,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
}
c := auxIntToInt8(v_0.AuxInt)
v_1 := v.Args[1]
- if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c || len(v.Args) != 2 {
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c {
break
}
v.reset(OpConst8)
@@ -18566,6 +18645,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
// match: (Phi (Const16 [c]) (Const16 [c]))
// result: (Const16 [c])
for {
+ if len(v.Args) != 2 {
+ break
+ }
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpConst16 {
@@ -18573,7 +18655,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
}
c := auxIntToInt16(v_0.AuxInt)
v_1 := v.Args[1]
- if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c || len(v.Args) != 2 {
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c {
break
}
v.reset(OpConst16)
@@ -18583,6 +18665,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
// match: (Phi (Const32 [c]) (Const32 [c]))
// result: (Const32 [c])
for {
+ if len(v.Args) != 2 {
+ break
+ }
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
@@ -18590,7 +18675,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
}
c := auxIntToInt32(v_0.AuxInt)
v_1 := v.Args[1]
- if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c || len(v.Args) != 2 {
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c {
break
}
v.reset(OpConst32)
@@ -18600,6 +18685,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
// match: (Phi (Const64 [c]) (Const64 [c]))
// result: (Const64 [c])
for {
+ if len(v.Args) != 2 {
+ break
+ }
_ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
@@ -18607,7 +18695,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
v_1 := v.Args[1]
- if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || len(v.Args) != 2 {
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c {
break
}
v.reset(OpConst64)
@@ -20615,6 +20703,70 @@ func rewriteValuegeneric_OpSelect1(v *Value) bool {
}
return false
}
+func rewriteValuegeneric_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(dst.Type.Elem())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const32 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(dst.Type.Elem())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
v_0 := v.Args[0]
// match: (SignExt16to32 (Const16 [c]))
@@ -21660,6 +21812,48 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
v.copyOf(mem)
return true
}
+ // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
// match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem)))
// cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
// result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
@@ -24357,6 +24551,24 @@ func rewriteValuegeneric_OpZero(v *Value) bool {
v.copyOf(mem)
return true
}
+ // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ mem := v_1
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
// match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
// cond: isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + t2.Size() && clobber(store)
// result: (Zero {t1} [n] p1 mem)
diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go
index c5df457c4e..7b4ee2e81c 100644
--- a/src/cmd/compile/internal/ssa/shortcircuit.go
+++ b/src/cmd/compile/internal/ssa/shortcircuit.go
@@ -261,11 +261,6 @@ func shortcircuitBlock(b *Block) bool {
// and the CFG modifications must not proceed.
// The returned function assumes that shortcircuitBlock has completed its CFG modifications.
func shortcircuitPhiPlan(b *Block, ctl *Value, cidx int, ti int64) func(*Value, int) {
- const go115shortcircuitPhis = true
- if !go115shortcircuitPhis {
- return nil
- }
-
// t is the "taken" branch: the successor we always go to when coming in from p.
t := b.Succs[ti].b
// u is the "untaken" branch: the successor we never go to when coming in from p.
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index 94b8763d5d..edc43aaae7 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -348,6 +348,9 @@ func (v *Value) reset(op Op) {
// It modifies v to be (Copy a).
//go:noinline
func (v *Value) copyOf(a *Value) {
+ if v == a {
+ return
+ }
if v.InCache {
v.Block.Func.unCache(v)
}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index df54a45b0f..849c9e8967 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -527,7 +527,7 @@ func IsStackAddr(v *Value) bool {
v = v.Args[0]
}
switch v.Op {
- case OpSP, OpLocalAddr:
+ case OpSP, OpLocalAddr, OpSelectNAddr:
return true
}
return false
@@ -593,7 +593,7 @@ func IsSanitizerSafeAddr(v *Value) bool {
v = v.Args[0]
}
switch v.Op {
- case OpSP, OpLocalAddr:
+ case OpSP, OpLocalAddr, OpSelectNAddr:
// Stack addresses are always safe.
return true
case OpITab, OpStringPtr, OpGetClosurePtr:
@@ -609,7 +609,7 @@ func IsSanitizerSafeAddr(v *Value) bool {
// isVolatile reports whether v is a pointer to argument region on stack which
// will be clobbered by a function call.
func isVolatile(v *Value) bool {
- for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
v = v.Args[0]
}
return v.Op == OpSP
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index 9601fab9e0..1485b70059 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -287,6 +287,7 @@ func tokstring(tok token) string {
// Convenience methods using the current token position.
func (p *parser) pos() Pos { return p.posAt(p.line, p.col) }
+func (p *parser) error(msg string) { p.errorAt(p.pos(), msg) }
func (p *parser) syntaxError(msg string) { p.syntaxErrorAt(p.pos(), msg) }
// The stopset contains keywords that start a statement.
@@ -997,17 +998,20 @@ loop:
// x[i:j...
t.Index[1] = p.expr()
}
- if p.got(_Colon) {
+ if p.tok == _Colon {
t.Full = true
// x[i:j:...]
if t.Index[1] == nil {
p.error("middle index required in 3-index slice")
+ t.Index[1] = p.badExpr()
}
+ p.next()
if p.tok != _Rbrack {
// x[i:j:k...
t.Index[2] = p.expr()
} else {
p.error("final index required in 3-index slice")
+ t.Index[2] = p.badExpr()
}
}
p.want(_Rbrack)
@@ -1836,6 +1840,7 @@ func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleS
if p.tok == _Lbrace {
if keyword == _If {
p.syntaxError("missing condition in if statement")
+ cond = p.badExpr()
}
return
}
@@ -1907,6 +1912,9 @@ done:
} else {
p.syntaxErrorAt(semi.pos, "missing condition in if statement")
}
+ b := new(BadExpr)
+ b.pos = semi.pos
+ cond = b
}
case *ExprStmt:
cond = s.X
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 5d1d5d4008..023ab9af88 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -105,14 +105,14 @@ var (
Errortype *Type
// Types to represent untyped string and boolean constants.
- Idealstring *Type
- Idealbool *Type
+ UntypedString *Type
+ UntypedBool *Type
// Types to represent untyped numeric constants.
- Idealint = New(TIDEAL)
- Idealrune = New(TIDEAL)
- Idealfloat = New(TIDEAL)
- Idealcomplex = New(TIDEAL)
+ UntypedInt = New(TIDEAL)
+ UntypedRune = New(TIDEAL)
+ UntypedFloat = New(TIDEAL)
+ UntypedComplex = New(TIDEAL)
)
// A Type represents a Go type.
@@ -1436,7 +1436,7 @@ func (t *Type) IsUntyped() bool {
if t == nil {
return false
}
- if t == Idealstring || t == Idealbool {
+ if t == UntypedString || t == UntypedBool {
return true
}
switch t.Etype {
diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go
deleted file mode 100644
index 594adb2cd5..0000000000
--- a/src/cmd/compile/internal/x86/387.go
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
- "cmd/compile/internal/gc"
- "cmd/compile/internal/ssa"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/obj/x86"
- "math"
-)
-
-// Generates code for v using 387 instructions.
-func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) {
- // The SSA compiler pretends that it has an SSE backend.
- // If we don't have one of those, we need to translate
- // all the SSE ops to equivalent 387 ops. That's what this
- // function does.
-
- switch v.Op {
- case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
- iv := uint64(v.AuxInt)
- if iv == 0x0000000000000000 { // +0.0
- s.Prog(x86.AFLDZ)
- } else if iv == 0x3ff0000000000000 { // +1.0
- s.Prog(x86.AFLD1)
- } else if iv == 0x8000000000000000 { // -0.0
- s.Prog(x86.AFLDZ)
- s.Prog(x86.AFCHS)
- } else if iv == 0xbff0000000000000 { // -1.0
- s.Prog(x86.AFLD1)
- s.Prog(x86.AFCHS)
- } else if iv == 0x400921fb54442d18 { // +pi
- s.Prog(x86.AFLDPI)
- } else if iv == 0xc00921fb54442d18 { // -pi
- s.Prog(x86.AFLDPI)
- s.Prog(x86.AFCHS)
- } else { // others
- p := s.Prog(loadPush(v.Type))
- p.From.Type = obj.TYPE_FCONST
- p.From.Val = math.Float64frombits(iv)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- }
- popAndSave(s, v)
-
- case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
- p := s.Prog(loadPush(v.Type))
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = v.Args[0].Reg()
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- popAndSave(s, v)
-
- case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8:
- p := s.Prog(loadPush(v.Type))
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
- switch v.Op {
- case ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
- p.From.Scale = 1
- p.From.Index = v.Args[1].Reg()
- if p.From.Index == x86.REG_SP {
- p.From.Reg, p.From.Index = p.From.Index, p.From.Reg
- }
- case ssa.Op386MOVSSloadidx4:
- p.From.Scale = 4
- p.From.Index = v.Args[1].Reg()
- case ssa.Op386MOVSDloadidx8:
- p.From.Scale = 8
- p.From.Index = v.Args[1].Reg()
- }
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- popAndSave(s, v)
-
- case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore:
- // Push to-be-stored value on top of stack.
- push(s, v.Args[1])
-
- // Pop and store value.
- var op obj.As
- switch v.Op {
- case ssa.Op386MOVSSstore:
- op = x86.AFMOVFP
- case ssa.Op386MOVSDstore:
- op = x86.AFMOVDP
- }
- p := s.Prog(op)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
-
- case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVSDstoreidx8:
- push(s, v.Args[2])
- var op obj.As
- switch v.Op {
- case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSSstoreidx4:
- op = x86.AFMOVFP
- case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8:
- op = x86.AFMOVDP
- }
- p := s.Prog(op)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
- switch v.Op {
- case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
- p.To.Scale = 1
- p.To.Index = v.Args[1].Reg()
- if p.To.Index == x86.REG_SP {
- p.To.Reg, p.To.Index = p.To.Index, p.To.Reg
- }
- case ssa.Op386MOVSSstoreidx4:
- p.To.Scale = 4
- p.To.Index = v.Args[1].Reg()
- case ssa.Op386MOVSDstoreidx8:
- p.To.Scale = 8
- p.To.Index = v.Args[1].Reg()
- }
-
- case ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
- ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD:
- if v.Reg() != v.Args[0].Reg() {
- v.Fatalf("input[0] and output not in same register %s", v.LongString())
- }
-
- // Push arg1 on top of stack
- push(s, v.Args[1])
-
- // Set precision if needed. 64 bits is the default.
- switch v.Op {
- case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
- // Save AX so we can use it as scratch space.
- p := s.Prog(x86.AMOVL)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_AX
- s.AddrScratch(&p.To)
- // Install a 32-bit version of the control word.
- installControlWord(s, gc.ControlWord32, x86.REG_AX)
- // Restore AX.
- p = s.Prog(x86.AMOVL)
- s.AddrScratch(&p.From)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_AX
- }
-
- var op obj.As
- switch v.Op {
- case ssa.Op386ADDSS, ssa.Op386ADDSD:
- op = x86.AFADDDP
- case ssa.Op386SUBSS, ssa.Op386SUBSD:
- op = x86.AFSUBDP
- case ssa.Op386MULSS, ssa.Op386MULSD:
- op = x86.AFMULDP
- case ssa.Op386DIVSS, ssa.Op386DIVSD:
- op = x86.AFDIVDP
- }
- p := s.Prog(op)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = s.SSEto387[v.Reg()] + 1
-
- // Restore precision if needed.
- switch v.Op {
- case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
- restoreControlWord(s)
- }
-
- case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
- push(s, v.Args[0])
-
- // Compare.
- p := s.Prog(x86.AFUCOMP)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1
-
- // Save AX.
- p = s.Prog(x86.AMOVL)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_AX
- s.AddrScratch(&p.To)
-
- // Move status word into AX.
- p = s.Prog(x86.AFSTSW)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_AX
-
- // Then move the flags we need to the integer flags.
- s.Prog(x86.ASAHF)
-
- // Restore AX.
- p = s.Prog(x86.AMOVL)
- s.AddrScratch(&p.From)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_AX
-
- case ssa.Op386SQRTSD:
- push(s, v.Args[0])
- s.Prog(x86.AFSQRT)
- popAndSave(s, v)
-
- case ssa.Op386FCHS:
- push(s, v.Args[0])
- s.Prog(x86.AFCHS)
- popAndSave(s, v)
-
- case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD:
- p := s.Prog(x86.AMOVL)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = v.Args[0].Reg()
- s.AddrScratch(&p.To)
- p = s.Prog(x86.AFMOVL)
- s.AddrScratch(&p.From)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- popAndSave(s, v)
-
- case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL:
- push(s, v.Args[0])
-
- // Load control word which truncates (rounds towards zero).
- installControlWord(s, gc.ControlWord64trunc, v.Reg())
-
- // Now do the conversion.
- p := s.Prog(x86.AFMOVLP)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- s.AddrScratch(&p.To)
- p = s.Prog(x86.AMOVL)
- s.AddrScratch(&p.From)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = v.Reg()
-
- // Restore control word.
- restoreControlWord(s)
-
- case ssa.Op386CVTSS2SD:
- // float32 -> float64 is a nop
- push(s, v.Args[0])
- popAndSave(s, v)
-
- case ssa.Op386CVTSD2SS:
- // Round to nearest float32.
- push(s, v.Args[0])
- p := s.Prog(x86.AFMOVFP)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- s.AddrScratch(&p.To)
- p = s.Prog(x86.AFMOVF)
- s.AddrScratch(&p.From)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- popAndSave(s, v)
-
- case ssa.OpLoadReg:
- if !v.Type.IsFloat() {
- ssaGenValue(s, v)
- return
- }
- // Load+push the value we need.
- p := s.Prog(loadPush(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- // Move the value to its assigned register.
- popAndSave(s, v)
-
- case ssa.OpStoreReg:
- if !v.Type.IsFloat() {
- ssaGenValue(s, v)
- return
- }
- push(s, v.Args[0])
- var op obj.As
- switch v.Type.Size() {
- case 4:
- op = x86.AFMOVFP
- case 8:
- op = x86.AFMOVDP
- }
- p := s.Prog(op)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- gc.AddrAuto(&p.To, v)
-
- case ssa.OpCopy:
- if !v.Type.IsFloat() {
- ssaGenValue(s, v)
- return
- }
- push(s, v.Args[0])
- popAndSave(s, v)
-
- case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
- flush387(s) // Calls must empty the FP stack.
- fallthrough // then issue the call as normal
- default:
- ssaGenValue(s, v)
- }
-}
-
-// push pushes v onto the floating-point stack. v must be in a register.
-func push(s *gc.SSAGenState, v *ssa.Value) {
- p := s.Prog(x86.AFMOVD)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = s.SSEto387[v.Reg()]
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
-}
-
-// popAndSave pops a value off of the floating-point stack and stores
-// it in the register assigned to v.
-func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
- r := v.Reg()
- if _, ok := s.SSEto387[r]; ok {
- // Pop value, write to correct register.
- p := s.Prog(x86.AFMOVDP)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = s.SSEto387[v.Reg()] + 1
- } else {
- // Don't actually pop value. This 387 register is now the
- // new home for the not-yet-assigned-a-home SSE register.
- // Increase the register mapping of all other registers by one.
- for rSSE, r387 := range s.SSEto387 {
- s.SSEto387[rSSE] = r387 + 1
- }
- s.SSEto387[r] = x86.REG_F0
- }
-}
-
-// loadPush returns the opcode for load+push of the given type.
-func loadPush(t *types.Type) obj.As {
- if t.Size() == 4 {
- return x86.AFMOVF
- }
- return x86.AFMOVD
-}
-
-// flush387 removes all entries from the 387 floating-point stack.
-func flush387(s *gc.SSAGenState) {
- for k := range s.SSEto387 {
- p := s.Prog(x86.AFMOVDP)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_F0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_F0
- delete(s.SSEto387, k)
- }
-}
-
-func ssaGenBlock387(s *gc.SSAGenState, b, next *ssa.Block) {
- // Empty the 387's FP stack before the block ends.
- flush387(s)
-
- ssaGenBlock(s, b, next)
-}
-
-// installControlWord saves the current floating-point control
-// word and installs a new one loaded from cw.
-// scratchReg must be an unused register.
-// This call must be paired with restoreControlWord.
-// Bytes 4-5 of the scratch space (s.AddrScratch) are used between
-// this call and restoreControlWord.
-func installControlWord(s *gc.SSAGenState, cw *obj.LSym, scratchReg int16) {
- // Save current control word.
- p := s.Prog(x86.AFSTCW)
- s.AddrScratch(&p.To)
- p.To.Offset += 4
-
- // Materialize address of new control word.
- // Note: this must be a seperate instruction to handle PIE correctly.
- // See issue 41503.
- p = s.Prog(x86.ALEAL)
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_EXTERN
- p.From.Sym = cw
- p.To.Type = obj.TYPE_REG
- p.To.Reg = scratchReg
-
- // Load replacement control word.
- p = s.Prog(x86.AFLDCW)
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = scratchReg
-}
-func restoreControlWord(s *gc.SSAGenState) {
- p := s.Prog(x86.AFLDCW)
- s.AddrScratch(&p.From)
- p.From.Offset += 4
-}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index 56c6989d93..e137daa3fc 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -15,19 +15,21 @@ import (
func Init(arch *gc.Arch) {
arch.LinkArch = &x86.Link386
arch.REGSP = x86.REGSP
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.MAXWIDTH = (1 << 32) - 1
switch v := objabi.GO386; v {
- case "387":
- arch.Use387 = true
- arch.SSAGenValue = ssaGenValue387
- arch.SSAGenBlock = ssaGenBlock387
case "sse2":
- arch.SSAGenValue = ssaGenValue
- arch.SSAGenBlock = ssaGenBlock
+ case "softfloat":
+ arch.SoftFloat = true
+ case "387":
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
+ gc.Exit(1)
default:
fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
gc.Exit(1)
+
}
- arch.MAXWIDTH = (1 << 32) - 1
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index c21ac32297..74a4570770 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -852,8 +852,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
}
- case ssa.Op386FCHS:
- v.Fatalf("FCHS in non-387 mode")
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST