aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/gc
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/gc')
-rw-r--r--src/cmd/compile/internal/gc/alg.go152
-rw-r--r--src/cmd/compile/internal/gc/align.go12
-rw-r--r--src/cmd/compile/internal/gc/bench_test.go12
-rw-r--r--src/cmd/compile/internal/gc/bexport.go43
-rw-r--r--src/cmd/compile/internal/gc/builtin.go431
-rw-r--r--src/cmd/compile/internal/gc/builtin/runtime.go1
-rw-r--r--src/cmd/compile/internal/gc/closure.go8
-rw-r--r--src/cmd/compile/internal/gc/const.go71
-rw-r--r--src/cmd/compile/internal/gc/dcl.go2
-rw-r--r--src/cmd/compile/internal/gc/dwinl.go4
-rw-r--r--src/cmd/compile/internal/gc/embed.go273
-rw-r--r--src/cmd/compile/internal/gc/esc.go47
-rw-r--r--src/cmd/compile/internal/gc/escape.go47
-rw-r--r--src/cmd/compile/internal/gc/export.go12
-rw-r--r--src/cmd/compile/internal/gc/float_test.go19
-rw-r--r--src/cmd/compile/internal/gc/fmt.go27
-rw-r--r--src/cmd/compile/internal/gc/go.go21
-rw-r--r--src/cmd/compile/internal/gc/gsubr.go4
-rw-r--r--src/cmd/compile/internal/gc/iexport.go21
-rw-r--r--src/cmd/compile/internal/gc/iimport.go10
-rw-r--r--src/cmd/compile/internal/gc/inl.go273
-rw-r--r--src/cmd/compile/internal/gc/inl_test.go3
-rw-r--r--src/cmd/compile/internal/gc/lex.go7
-rw-r--r--src/cmd/compile/internal/gc/main.go119
-rw-r--r--src/cmd/compile/internal/gc/noder.go157
-rw-r--r--src/cmd/compile/internal/gc/obj.go143
-rw-r--r--src/cmd/compile/internal/gc/order.go13
-rw-r--r--src/cmd/compile/internal/gc/pgen.go18
-rw-r--r--src/cmd/compile/internal/gc/plive.go9
-rw-r--r--src/cmd/compile/internal/gc/range.go17
-rw-r--r--src/cmd/compile/internal/gc/reflect.go3
-rw-r--r--src/cmd/compile/internal/gc/scope.go4
-rw-r--r--src/cmd/compile/internal/gc/sinit.go27
-rw-r--r--src/cmd/compile/internal/gc/ssa.go400
-rw-r--r--src/cmd/compile/internal/gc/subr.go189
-rw-r--r--src/cmd/compile/internal/gc/swt.go29
-rw-r--r--src/cmd/compile/internal/gc/syntax.go98
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go71
-rw-r--r--src/cmd/compile/internal/gc/universe.go10
-rw-r--r--src/cmd/compile/internal/gc/walk.go179
40 files changed, 1848 insertions, 1138 deletions
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
index 6302b88f59..2f7fa27bb9 100644
--- a/src/cmd/compile/internal/gc/alg.go
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -282,7 +282,7 @@ func genhash(t *types.Type) *obj.LSym {
}
sym := typesymprefix(".hash", t)
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
@@ -374,7 +374,7 @@ func genhash(t *types.Type) *obj.LSym {
r.List.Append(nh)
fn.Nbody.Append(r)
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
dumplist("genhash body", fn.Nbody)
}
@@ -509,7 +509,7 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
sym := typesymprefix(".eq", t)
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
fmt.Printf("geneq %v\n", t)
}
@@ -529,6 +529,10 @@ func geneq(t *types.Type) *obj.LSym {
fn := dclfunc(sym, tfn)
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
+ nr := asNode(tfn.Type.Results().Field(0).Nname)
+
+ // Label to jump to if an equality test fails.
+ neq := autolabel(".neq")
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
@@ -555,13 +559,13 @@ func geneq(t *types.Type) *obj.LSym {
// for i := 0; i < nelem; i++ {
// if eq(p[i], q[i]) {
// } else {
- // return
+ // goto neq
// }
// }
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
- checkAll := func(unroll int64, eq func(pi, qi *Node) *Node) {
+ checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
// checkIdx generates a node to check for equality at index i.
checkIdx := func(i *Node) *Node {
// pi := p[i]
@@ -576,37 +580,38 @@ func geneq(t *types.Type) *obj.LSym {
}
if nelem <= unroll {
+ if last {
+ // Do last comparison in a different manner.
+ nelem--
+ }
// Generate a series of checks.
- var cond *Node
for i := int64(0); i < nelem; i++ {
- c := nodintconst(i)
- check := checkIdx(c)
- if cond == nil {
- cond = check
- continue
- }
- cond = nod(OANDAND, cond, check)
+ // if check {} else { goto neq }
+ nif := nod(OIF, checkIdx(nodintconst(i)), nil)
+ nif.Rlist.Append(nodSym(OGOTO, nil, neq))
+ fn.Nbody.Append(nif)
+ }
+ if last {
+ fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
+ }
+ } else {
+ // Generate a for loop.
+ // for i := 0; i < nelem; i++
+ i := temp(types.Types[TINT])
+ init := nod(OAS, i, nodintconst(0))
+ cond := nod(OLT, i, nodintconst(nelem))
+ post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
+ loop := nod(OFOR, cond, post)
+ loop.Ninit.Append(init)
+ // if eq(pi, qi) {} else { goto neq }
+ nif := nod(OIF, checkIdx(i), nil)
+ nif.Rlist.Append(nodSym(OGOTO, nil, neq))
+ loop.Nbody.Append(nif)
+ fn.Nbody.Append(loop)
+ if last {
+ fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
}
- nif := nod(OIF, cond, nil)
- nif.Rlist.Append(nod(ORETURN, nil, nil))
- fn.Nbody.Append(nif)
- return
}
-
- // Generate a for loop.
- // for i := 0; i < nelem; i++
- i := temp(types.Types[TINT])
- init := nod(OAS, i, nodintconst(0))
- cond := nod(OLT, i, nodintconst(nelem))
- post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
- loop := nod(OFOR, cond, post)
- loop.Ninit.Append(init)
- // if eq(pi, qi) {} else { return }
- check := checkIdx(i)
- nif := nod(OIF, check, nil)
- nif.Rlist.Append(nod(ORETURN, nil, nil))
- loop.Nbody.Append(nif)
- fn.Nbody.Append(loop)
}
switch t.Elem().Etype {
@@ -614,32 +619,28 @@ func geneq(t *types.Type) *obj.LSym {
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
- checkAll(3, func(pi, qi *Node) *Node {
+ checkAll(3, false, func(pi, qi *Node) *Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
- checkAll(1, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi *Node) *Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case TFLOAT32, TFLOAT64:
- checkAll(2, func(pi, qi *Node) *Node {
+ checkAll(2, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
- checkAll(1, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi *Node) *Node {
// p[i] == q[i]
return nod(OEQ, pi, qi)
})
}
- // return true
- ret := nod(ORETURN, nil, nil)
- ret.List.Append(nodbool(true))
- fn.Nbody.Append(ret)
case TSTRUCT:
// Build a list of conditions to satisfy.
@@ -717,22 +718,42 @@ func geneq(t *types.Type) *obj.LSym {
flatConds = append(flatConds, c...)
}
- var cond *Node
if len(flatConds) == 0 {
- cond = nodbool(true)
+ fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
} else {
- cond = flatConds[0]
- for _, c := range flatConds[1:] {
- cond = nod(OANDAND, cond, c)
+ for _, c := range flatConds[:len(flatConds)-1] {
+ // if cond {} else { goto neq }
+ n := nod(OIF, c, nil)
+ n.Rlist.Append(nodSym(OGOTO, nil, neq))
+ fn.Nbody.Append(n)
}
+ fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
}
+ }
- ret := nod(ORETURN, nil, nil)
- ret.List.Append(cond)
- fn.Nbody.Append(ret)
+ // ret:
+ // return
+ ret := autolabel(".ret")
+ fn.Nbody.Append(nodSym(OLABEL, nil, ret))
+ fn.Nbody.Append(nod(ORETURN, nil, nil))
+
+ // neq:
+ // r = false
+ // return (or goto ret)
+ fn.Nbody.Append(nodSym(OLABEL, nil, neq))
+ fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
+ if EqCanPanic(t) || hasCall(fn) {
+ // Epilogue is large, so share it with the equal case.
+ fn.Nbody.Append(nodSym(OGOTO, nil, ret))
+ } else {
+ // Epilogue is small, so don't bother sharing.
+ fn.Nbody.Append(nod(ORETURN, nil, nil))
}
+ // TODO(khr): the epilogue size detection condition above isn't perfect.
+ // We should really do a generic CL that shares epilogues across
+ // the board. See #24936.
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
dumplist("geneq body", fn.Nbody)
}
@@ -762,6 +783,39 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
+func hasCall(n *Node) bool {
+ if n.Op == OCALL || n.Op == OCALLFUNC {
+ return true
+ }
+ if n.Left != nil && hasCall(n.Left) {
+ return true
+ }
+ if n.Right != nil && hasCall(n.Right) {
+ return true
+ }
+ for _, x := range n.Ninit.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.Nbody.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.List.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.Rlist.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ return false
+}
+
// eqfield returns the node
// p.field == q.field
func eqfield(p *Node, q *Node, field *types.Sym) *Node {
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index 5af403afa3..a3a0c8fce8 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -86,7 +86,7 @@ func expandiface(t *types.Type) {
sort.Sort(methcmp(methods))
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
- yyerror("interface too large")
+ yyerrorl(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(Widthptr)
@@ -150,7 +150,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
- yyerror("type %L too large", errtype)
+ yyerrorl(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
@@ -199,7 +199,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
}
*path = append(*path, t)
- if findTypeLoop(asNode(t.Nod).Name.Param.Ntype.Type, path) {
+ if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
return true
}
*path = (*path)[:len(*path)-1]
@@ -381,7 +381,7 @@ func dowidth(t *types.Type) {
t1 := t.ChanArgs()
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
- yyerror("channel element type too large (>64kB)")
+ yyerrorl(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
@@ -414,7 +414,7 @@ func dowidth(t *types.Type) {
if t.Elem().Width != 0 {
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
- yyerror("type %L larger than address space", t)
+ yyerrorl(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
@@ -456,7 +456,7 @@ func dowidth(t *types.Type) {
}
if Widthptr == 4 && w != int64(int32(w)) {
- yyerror("type %v too large", t)
+ yyerrorl(typePos(t), "type %v too large", t)
}
t.Width = w
diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/gc/bench_test.go
index a2887f2f7b..8c4288128f 100644
--- a/src/cmd/compile/internal/gc/bench_test.go
+++ b/src/cmd/compile/internal/gc/bench_test.go
@@ -7,6 +7,7 @@ package gc
import "testing"
var globl int64
+var globl32 int32
func BenchmarkLoadAdd(b *testing.B) {
x := make([]int64, 1024)
@@ -42,6 +43,17 @@ func BenchmarkModify(b *testing.B) {
}
}
+func BenchmarkMullImm(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int32
+ for i := range x {
+ s += x[i] * 100
+ }
+ globl32 = s
+ }
+}
+
func BenchmarkConstModify(b *testing.B) {
a := make([]int64, 1024)
for i := 0; i < b.N; i++ {
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
index 5ced66c0da..10f21f86df 100644
--- a/src/cmd/compile/internal/gc/bexport.go
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -81,11 +81,6 @@ func (p *exporter) markType(t *types.Type) {
}
}
-// deltaNewFile is a magic line delta offset indicating a new file.
-// We use -64 because it is rare; see issue 20080 and CL 41619.
-// -64 is the smallest int that fits in a single byte as a varint.
-const deltaNewFile = -64
-
// ----------------------------------------------------------------------------
// Export format
@@ -126,30 +121,6 @@ const (
aliasTag
)
-// untype returns the "pseudo" untyped type for a Ctype (import/export use only).
-// (we can't use a pre-initialized array because we must be sure all types are
-// set up)
-func untype(ctype Ctype) *types.Type {
- switch ctype {
- case CTINT:
- return types.Idealint
- case CTRUNE:
- return types.Idealrune
- case CTFLT:
- return types.Idealfloat
- case CTCPLX:
- return types.Idealcomplex
- case CTSTR:
- return types.Idealstring
- case CTBOOL:
- return types.Idealbool
- case CTNIL:
- return types.Types[TNIL]
- }
- Fatalf("exporter: unknown Ctype")
- return nil
-}
-
var predecl []*types.Type // initialized lazily
func predeclared() []*types.Type {
@@ -184,13 +155,13 @@ func predeclared() []*types.Type {
types.Errortype,
// untyped types
- untype(CTBOOL),
- untype(CTINT),
- untype(CTRUNE),
- untype(CTFLT),
- untype(CTCPLX),
- untype(CTSTR),
- untype(CTNIL),
+ types.UntypedBool,
+ types.UntypedInt,
+ types.UntypedRune,
+ types.UntypedFloat,
+ types.UntypedComplex,
+ types.UntypedString,
+ types.Types[TNIL],
// package unsafe
types.Types[TUNSAFEPTR],
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index da7b107bfe..fd95b657b2 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -44,6 +44,7 @@ var runtimeDecls = [...]struct {
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
+ {"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
@@ -51,134 +52,134 @@ var runtimeDecls = [...]struct {
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
- {"concatstring2", funcTag, 33},
- {"concatstring3", funcTag, 34},
- {"concatstring4", funcTag, 35},
- {"concatstring5", funcTag, 36},
- {"concatstrings", funcTag, 38},
- {"cmpstring", funcTag, 39},
- {"intstring", funcTag, 42},
- {"slicebytetostring", funcTag, 43},
- {"slicebytetostringtmp", funcTag, 44},
- {"slicerunetostring", funcTag, 47},
- {"stringtoslicebyte", funcTag, 49},
- {"stringtoslicerune", funcTag, 52},
- {"slicecopy", funcTag, 53},
- {"decoderune", funcTag, 54},
- {"countrunes", funcTag, 55},
- {"convI2I", funcTag, 56},
- {"convT16", funcTag, 57},
- {"convT32", funcTag, 57},
- {"convT64", funcTag, 57},
- {"convTstring", funcTag, 57},
- {"convTslice", funcTag, 57},
- {"convT2E", funcTag, 58},
- {"convT2Enoptr", funcTag, 58},
- {"convT2I", funcTag, 58},
- {"convT2Inoptr", funcTag, 58},
- {"assertE2I", funcTag, 56},
- {"assertE2I2", funcTag, 59},
- {"assertI2I", funcTag, 56},
- {"assertI2I2", funcTag, 59},
- {"panicdottypeE", funcTag, 60},
- {"panicdottypeI", funcTag, 60},
- {"panicnildottype", funcTag, 61},
- {"ifaceeq", funcTag, 63},
- {"efaceeq", funcTag, 63},
- {"fastrand", funcTag, 65},
- {"makemap64", funcTag, 67},
- {"makemap", funcTag, 68},
- {"makemap_small", funcTag, 69},
- {"mapaccess1", funcTag, 70},
- {"mapaccess1_fast32", funcTag, 71},
- {"mapaccess1_fast64", funcTag, 71},
- {"mapaccess1_faststr", funcTag, 71},
- {"mapaccess1_fat", funcTag, 72},
- {"mapaccess2", funcTag, 73},
- {"mapaccess2_fast32", funcTag, 74},
- {"mapaccess2_fast64", funcTag, 74},
- {"mapaccess2_faststr", funcTag, 74},
- {"mapaccess2_fat", funcTag, 75},
- {"mapassign", funcTag, 70},
- {"mapassign_fast32", funcTag, 71},
- {"mapassign_fast32ptr", funcTag, 71},
- {"mapassign_fast64", funcTag, 71},
- {"mapassign_fast64ptr", funcTag, 71},
- {"mapassign_faststr", funcTag, 71},
- {"mapiterinit", funcTag, 76},
- {"mapdelete", funcTag, 76},
- {"mapdelete_fast32", funcTag, 77},
- {"mapdelete_fast64", funcTag, 77},
- {"mapdelete_faststr", funcTag, 77},
- {"mapiternext", funcTag, 78},
- {"mapclear", funcTag, 79},
- {"makechan64", funcTag, 81},
- {"makechan", funcTag, 82},
- {"chanrecv1", funcTag, 84},
- {"chanrecv2", funcTag, 85},
- {"chansend1", funcTag, 87},
+ {"concatstring2", funcTag, 34},
+ {"concatstring3", funcTag, 35},
+ {"concatstring4", funcTag, 36},
+ {"concatstring5", funcTag, 37},
+ {"concatstrings", funcTag, 39},
+ {"cmpstring", funcTag, 40},
+ {"intstring", funcTag, 43},
+ {"slicebytetostring", funcTag, 44},
+ {"slicebytetostringtmp", funcTag, 45},
+ {"slicerunetostring", funcTag, 48},
+ {"stringtoslicebyte", funcTag, 50},
+ {"stringtoslicerune", funcTag, 53},
+ {"slicecopy", funcTag, 54},
+ {"decoderune", funcTag, 55},
+ {"countrunes", funcTag, 56},
+ {"convI2I", funcTag, 57},
+ {"convT16", funcTag, 58},
+ {"convT32", funcTag, 58},
+ {"convT64", funcTag, 58},
+ {"convTstring", funcTag, 58},
+ {"convTslice", funcTag, 58},
+ {"convT2E", funcTag, 59},
+ {"convT2Enoptr", funcTag, 59},
+ {"convT2I", funcTag, 59},
+ {"convT2Inoptr", funcTag, 59},
+ {"assertE2I", funcTag, 57},
+ {"assertE2I2", funcTag, 60},
+ {"assertI2I", funcTag, 57},
+ {"assertI2I2", funcTag, 60},
+ {"panicdottypeE", funcTag, 61},
+ {"panicdottypeI", funcTag, 61},
+ {"panicnildottype", funcTag, 62},
+ {"ifaceeq", funcTag, 64},
+ {"efaceeq", funcTag, 64},
+ {"fastrand", funcTag, 66},
+ {"makemap64", funcTag, 68},
+ {"makemap", funcTag, 69},
+ {"makemap_small", funcTag, 70},
+ {"mapaccess1", funcTag, 71},
+ {"mapaccess1_fast32", funcTag, 72},
+ {"mapaccess1_fast64", funcTag, 72},
+ {"mapaccess1_faststr", funcTag, 72},
+ {"mapaccess1_fat", funcTag, 73},
+ {"mapaccess2", funcTag, 74},
+ {"mapaccess2_fast32", funcTag, 75},
+ {"mapaccess2_fast64", funcTag, 75},
+ {"mapaccess2_faststr", funcTag, 75},
+ {"mapaccess2_fat", funcTag, 76},
+ {"mapassign", funcTag, 71},
+ {"mapassign_fast32", funcTag, 72},
+ {"mapassign_fast32ptr", funcTag, 72},
+ {"mapassign_fast64", funcTag, 72},
+ {"mapassign_fast64ptr", funcTag, 72},
+ {"mapassign_faststr", funcTag, 72},
+ {"mapiterinit", funcTag, 77},
+ {"mapdelete", funcTag, 77},
+ {"mapdelete_fast32", funcTag, 78},
+ {"mapdelete_fast64", funcTag, 78},
+ {"mapdelete_faststr", funcTag, 78},
+ {"mapiternext", funcTag, 79},
+ {"mapclear", funcTag, 80},
+ {"makechan64", funcTag, 82},
+ {"makechan", funcTag, 83},
+ {"chanrecv1", funcTag, 85},
+ {"chanrecv2", funcTag, 86},
+ {"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
- {"writeBarrier", varTag, 89},
- {"typedmemmove", funcTag, 90},
- {"typedmemclr", funcTag, 91},
- {"typedslicecopy", funcTag, 92},
- {"selectnbsend", funcTag, 93},
- {"selectnbrecv", funcTag, 94},
- {"selectnbrecv2", funcTag, 96},
- {"selectsetpc", funcTag, 97},
- {"selectgo", funcTag, 98},
+ {"writeBarrier", varTag, 90},
+ {"typedmemmove", funcTag, 91},
+ {"typedmemclr", funcTag, 92},
+ {"typedslicecopy", funcTag, 93},
+ {"selectnbsend", funcTag, 94},
+ {"selectnbrecv", funcTag, 95},
+ {"selectnbrecv2", funcTag, 97},
+ {"selectsetpc", funcTag, 98},
+ {"selectgo", funcTag, 99},
{"block", funcTag, 9},
- {"makeslice", funcTag, 99},
- {"makeslice64", funcTag, 100},
- {"makeslicecopy", funcTag, 101},
- {"growslice", funcTag, 103},
- {"memmove", funcTag, 104},
- {"memclrNoHeapPointers", funcTag, 105},
- {"memclrHasPointers", funcTag, 105},
- {"memequal", funcTag, 106},
- {"memequal0", funcTag, 107},
- {"memequal8", funcTag, 107},
- {"memequal16", funcTag, 107},
- {"memequal32", funcTag, 107},
- {"memequal64", funcTag, 107},
- {"memequal128", funcTag, 107},
- {"f32equal", funcTag, 108},
- {"f64equal", funcTag, 108},
- {"c64equal", funcTag, 108},
- {"c128equal", funcTag, 108},
- {"strequal", funcTag, 108},
- {"interequal", funcTag, 108},
- {"nilinterequal", funcTag, 108},
- {"memhash", funcTag, 109},
- {"memhash0", funcTag, 110},
- {"memhash8", funcTag, 110},
- {"memhash16", funcTag, 110},
- {"memhash32", funcTag, 110},
- {"memhash64", funcTag, 110},
- {"memhash128", funcTag, 110},
- {"f32hash", funcTag, 110},
- {"f64hash", funcTag, 110},
- {"c64hash", funcTag, 110},
- {"c128hash", funcTag, 110},
- {"strhash", funcTag, 110},
- {"interhash", funcTag, 110},
- {"nilinterhash", funcTag, 110},
- {"int64div", funcTag, 111},
- {"uint64div", funcTag, 112},
- {"int64mod", funcTag, 111},
- {"uint64mod", funcTag, 112},
- {"float64toint64", funcTag, 113},
- {"float64touint64", funcTag, 114},
- {"float64touint32", funcTag, 115},
- {"int64tofloat64", funcTag, 116},
- {"uint64tofloat64", funcTag, 117},
- {"uint32tofloat64", funcTag, 118},
- {"complex128div", funcTag, 119},
- {"racefuncenter", funcTag, 120},
+ {"makeslice", funcTag, 100},
+ {"makeslice64", funcTag, 101},
+ {"makeslicecopy", funcTag, 102},
+ {"growslice", funcTag, 104},
+ {"memmove", funcTag, 105},
+ {"memclrNoHeapPointers", funcTag, 106},
+ {"memclrHasPointers", funcTag, 106},
+ {"memequal", funcTag, 107},
+ {"memequal0", funcTag, 108},
+ {"memequal8", funcTag, 108},
+ {"memequal16", funcTag, 108},
+ {"memequal32", funcTag, 108},
+ {"memequal64", funcTag, 108},
+ {"memequal128", funcTag, 108},
+ {"f32equal", funcTag, 109},
+ {"f64equal", funcTag, 109},
+ {"c64equal", funcTag, 109},
+ {"c128equal", funcTag, 109},
+ {"strequal", funcTag, 109},
+ {"interequal", funcTag, 109},
+ {"nilinterequal", funcTag, 109},
+ {"memhash", funcTag, 110},
+ {"memhash0", funcTag, 111},
+ {"memhash8", funcTag, 111},
+ {"memhash16", funcTag, 111},
+ {"memhash32", funcTag, 111},
+ {"memhash64", funcTag, 111},
+ {"memhash128", funcTag, 111},
+ {"f32hash", funcTag, 111},
+ {"f64hash", funcTag, 111},
+ {"c64hash", funcTag, 111},
+ {"c128hash", funcTag, 111},
+ {"strhash", funcTag, 111},
+ {"interhash", funcTag, 111},
+ {"nilinterhash", funcTag, 111},
+ {"int64div", funcTag, 112},
+ {"uint64div", funcTag, 113},
+ {"int64mod", funcTag, 112},
+ {"uint64mod", funcTag, 113},
+ {"float64toint64", funcTag, 114},
+ {"float64touint64", funcTag, 115},
+ {"float64touint32", funcTag, 116},
+ {"int64tofloat64", funcTag, 117},
+ {"uint64tofloat64", funcTag, 118},
+ {"uint32tofloat64", funcTag, 119},
+ {"complex128div", funcTag, 120},
+ {"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
- {"raceread", funcTag, 120},
- {"racewrite", funcTag, 120},
+ {"raceread", funcTag, 31},
+ {"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
@@ -233,96 +234,96 @@ func runtimeTypes() []*types.Type {
typs[28] = types.Types[TSTRING]
typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
- typs[31] = types.NewArray(typs[0], 32)
- typs[32] = types.NewPtr(typs[31])
- typs[33] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[34] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[35] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[36] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[37] = types.NewSlice(typs[28])
- typs[38] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[37])}, []*Node{anonfield(typs[28])})
- typs[39] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
- typs[40] = types.NewArray(typs[0], 4)
- typs[41] = types.NewPtr(typs[40])
- typs[42] = functype(nil, []*Node{anonfield(typs[41]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
- typs[43] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[44] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[45] = types.Runetype
- typs[46] = types.NewSlice(typs[45])
- typs[47] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[46])}, []*Node{anonfield(typs[28])})
- typs[48] = types.NewSlice(typs[0])
- typs[49] = functype(nil, []*Node{anonfield(typs[32]), anonfield(typs[28])}, []*Node{anonfield(typs[48])})
- typs[50] = types.NewArray(typs[45], 32)
- typs[51] = types.NewPtr(typs[50])
- typs[52] = functype(nil, []*Node{anonfield(typs[51]), anonfield(typs[28])}, []*Node{anonfield(typs[46])})
- typs[53] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
- typs[54] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[45]), anonfield(typs[15])})
- typs[55] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
- typs[56] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
- typs[57] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
- typs[58] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
- typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
- typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[61] = functype(nil, []*Node{anonfield(typs[1])}, nil)
- typs[62] = types.NewPtr(typs[5])
- typs[63] = functype(nil, []*Node{anonfield(typs[62]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[64] = types.Types[TUINT32]
- typs[65] = functype(nil, nil, []*Node{anonfield(typs[64])})
- typs[66] = types.NewMap(typs[2], typs[2])
- typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
- typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
- typs[69] = functype(nil, nil, []*Node{anonfield(typs[66])})
- typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
- typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
- typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, nil)
- typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, nil)
- typs[78] = functype(nil, []*Node{anonfield(typs[3])}, nil)
- typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66])}, nil)
- typs[80] = types.NewChan(typs[2], types.Cboth)
- typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[80])})
- typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[80])})
- typs[83] = types.NewChan(typs[2], types.Crecv)
- typs[84] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, nil)
- typs[85] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[86] = types.NewChan(typs[2], types.Csend)
- typs[87] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, nil)
- typs[88] = types.NewArray(typs[0], 3)
- typs[89] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[88]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
- typs[93] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
- typs[95] = types.NewPtr(typs[6])
- typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[95]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
- typs[97] = functype(nil, []*Node{anonfield(typs[62])}, nil)
- typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
- typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
- typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
- typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
- typs[102] = types.NewSlice(typs[2])
- typs[103] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[102]), anonfield(typs[15])}, []*Node{anonfield(typs[102])})
- typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[105] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
- typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[108] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[111] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
- typs[112] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
- typs[113] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
- typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
- typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[64])})
- typs[116] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
- typs[117] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
- typs[118] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[20])})
- typs[119] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
- typs[120] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[32] = types.NewArray(typs[0], 32)
+ typs[33] = types.NewPtr(typs[32])
+ typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[38] = types.NewSlice(typs[28])
+ typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
+ typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[41] = types.NewArray(typs[0], 4)
+ typs[42] = types.NewPtr(typs[41])
+ typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
+ typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[46] = types.Runetype
+ typs[47] = types.NewSlice(typs[46])
+ typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
+ typs[49] = types.NewSlice(typs[0])
+ typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
+ typs[51] = types.NewArray(typs[46], 32)
+ typs[52] = types.NewPtr(typs[51])
+ typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
+ typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
+ typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
+ typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
+ typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
+ typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
+ typs[63] = types.NewPtr(typs[5])
+ typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
+ typs[65] = types.Types[TUINT32]
+ typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
+ typs[67] = types.NewMap(typs[2], typs[2])
+ typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
+ typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
+ typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
+ typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
+ typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
+ typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[81] = types.NewChan(typs[2], types.Cboth)
+ typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
+ typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
+ typs[84] = types.NewChan(typs[2], types.Crecv)
+ typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[87] = types.NewChan(typs[2], types.Csend)
+ typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[89] = types.NewArray(typs[0], 3)
+ typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
+ typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[96] = types.NewPtr(typs[6])
+ typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
+ typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
+ typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
+ typs[103] = types.NewSlice(typs[2])
+ typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
+ typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
+ typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
+ typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
+ typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
+ typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
+ typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
+ typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
+ typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
+ typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
+ typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
+ typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
+ typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
+ typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
@@ -331,7 +332,7 @@ func runtimeTypes() []*types.Type {
typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[TUINT16]
typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
- typs[129] = functype(nil, []*Node{anonfield(typs[64]), anonfield(typs[64])}, nil)
+ typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 02d6c7b7f5..aac2de38c6 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -54,6 +54,7 @@ func printuint(uint64)
func printcomplex(complex128)
func printstring(string)
func printpointer(any)
+func printuintptr(uintptr)
func printiface(any)
func printeface(any)
func printslice(any)
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index 250be38e5b..902d2e34a3 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -198,7 +198,7 @@ func capturevars(xfunc *Node) {
outer = nod(OADDR, outer, nil)
}
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
var name *types.Sym
if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
name = v.Name.Curfn.Func.Nname.Sym
@@ -434,6 +434,8 @@ func typecheckpartialcall(fn *Node, sym *types.Sym) {
fn.Type = xfunc.Type
}
+// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
+// for partial calls.
func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
rcvrtype := fn.Left.Type
sym := methodSymSuffix(rcvrtype, meth, "-fm")
@@ -500,6 +502,10 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
funcbody()
xfunc = typecheck(xfunc, ctxStmt)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ Curfn = xfunc
+ typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
sym.Def = asTypesNode(xfunc)
xtop = append(xtop, xfunc)
Curfn = savecurfn
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index c0ed8192d9..b92c8d66b5 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -114,16 +114,16 @@ func (v Val) Interface() interface{} {
type NilVal struct{}
-// Int64 returns n as an int64.
+// Int64Val returns n as an int64.
// n must be an integer or rune constant.
-func (n *Node) Int64() int64 {
+func (n *Node) Int64Val() int64 {
if !Isconst(n, CTINT) {
- Fatalf("Int64(%v)", n)
+ Fatalf("Int64Val(%v)", n)
}
return n.Val().U.(*Mpint).Int64()
}
-// CanInt64 reports whether it is safe to call Int64() on n.
+// CanInt64 reports whether it is safe to call Int64Val() on n.
func (n *Node) CanInt64() bool {
if !Isconst(n, CTINT) {
return false
@@ -131,18 +131,27 @@ func (n *Node) CanInt64() bool {
// if the value inside n cannot be represented as an int64, the
// return value of Int64 is undefined
- return n.Val().U.(*Mpint).CmpInt64(n.Int64()) == 0
+ return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
}
-// Bool returns n as a bool.
+// BoolVal returns n as a bool.
// n must be a boolean constant.
-func (n *Node) Bool() bool {
+func (n *Node) BoolVal() bool {
if !Isconst(n, CTBOOL) {
- Fatalf("Bool(%v)", n)
+ Fatalf("BoolVal(%v)", n)
}
return n.Val().U.(bool)
}
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func (n *Node) StringVal() string {
+ if !Isconst(n, CTSTR) {
+ Fatalf("StringVal(%v)", n)
+ }
+ return n.Val().U.(string)
+}
+
// truncate float literal fv to 32-bit or 64-bit precision
// according to type; return truncated value.
func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
@@ -612,7 +621,7 @@ func evconst(n *Node) {
var strs []string
i2 := i1
for i2 < len(s) && Isconst(s[i2], CTSTR) {
- strs = append(strs, strlit(s[i2]))
+ strs = append(strs, s[i2].StringVal())
i2++
}
@@ -635,7 +644,7 @@ func evconst(n *Node) {
switch nl.Type.Etype {
case TSTRING:
if Isconst(nl, CTSTR) {
- setintconst(n, int64(len(strlit(nl))))
+ setintconst(n, int64(len(nl.StringVal())))
}
case TARRAY:
if !hascallchan(nl) {
@@ -1019,17 +1028,17 @@ func nodlit(v Val) *Node {
func idealType(ct Ctype) *types.Type {
switch ct {
case CTSTR:
- return types.Idealstring
+ return types.UntypedString
case CTBOOL:
- return types.Idealbool
+ return types.UntypedBool
case CTINT:
- return types.Idealint
+ return types.UntypedInt
case CTRUNE:
- return types.Idealrune
+ return types.UntypedRune
case CTFLT:
- return types.Idealfloat
+ return types.UntypedFloat
case CTCPLX:
- return types.Idealcomplex
+ return types.UntypedComplex
case CTNIL:
return types.Types[TNIL]
}
@@ -1080,17 +1089,17 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
func ctype(t *types.Type) Ctype {
switch t {
- case types.Idealbool:
+ case types.UntypedBool:
return CTBOOL
- case types.Idealstring:
+ case types.UntypedString:
return CTSTR
- case types.Idealint:
+ case types.UntypedInt:
return CTINT
- case types.Idealrune:
+ case types.UntypedRune:
return CTRUNE
- case types.Idealfloat:
+ case types.UntypedFloat:
return CTFLT
- case types.Idealcomplex:
+ case types.UntypedComplex:
return CTCPLX
}
Fatalf("bad type %v", t)
@@ -1111,17 +1120,17 @@ func defaultType(t *types.Type) *types.Type {
}
switch t {
- case types.Idealbool:
+ case types.UntypedBool:
return types.Types[TBOOL]
- case types.Idealstring:
+ case types.UntypedString:
return types.Types[TSTRING]
- case types.Idealint:
+ case types.UntypedInt:
return types.Types[TINT]
- case types.Idealrune:
+ case types.UntypedRune:
return types.Runetype
- case types.Idealfloat:
+ case types.UntypedFloat:
return types.Types[TFLOAT64]
- case types.Idealcomplex:
+ case types.UntypedComplex:
return types.Types[TCOMPLEX128]
}
@@ -1129,12 +1138,6 @@ func defaultType(t *types.Type) *types.Type {
return nil
}
-// strlit returns the value of a literal string Node as a string.
-func strlit(n *Node) string {
- return n.Val().U.(string)
-}
-
-// TODO(gri) smallintconst is only used in one place - can we used indexconst?
func smallintconst(n *Node) bool {
if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
switch simtype[n.Type.Etype] {
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index a362d1a643..b8ca0d2e03 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -283,7 +283,7 @@ func oldname(s *types.Sym) *Node {
c.Name.Defn = n
// Link into list of active closure variables.
- // Popped from list in func closurebody.
+ // Popped from list in func funcLit.
c.Name.Param.Outer = n.Name.Param.Innermost
n.Name.Param.Innermost = c
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go
index 27e2cbcd98..5120fa1166 100644
--- a/src/cmd/compile/internal/gc/dwinl.go
+++ b/src/cmd/compile/internal/gc/dwinl.go
@@ -34,7 +34,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// Walk progs to build up the InlCalls data structure
var prevpos src.XPos
- for p := fnsym.Func.Text; p != nil; p = p.Link {
+ for p := fnsym.Func().Text; p != nil; p = p.Link {
if p.Pos == prevpos {
continue
}
@@ -150,7 +150,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
start := int64(-1)
curii := -1
var prevp *obj.Prog
- for p := fnsym.Func.Text; p != nil; prevp, p = p, p.Link {
+ for p := fnsym.Func().Text; p != nil; prevp, p = p, p.Link {
if prevp != nil && p.Pos == prevp.Pos {
continue
}
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
new file mode 100644
index 0000000000..103949c1f9
--- /dev/null
+++ b/src/cmd/compile/internal/gc/embed.go
@@ -0,0 +1,273 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var embedlist []*Node
+
+var embedCfg struct {
+ Patterns map[string][]string
+ Files map[string]string
+}
+
+func readEmbedCfg(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &embedCfg); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if embedCfg.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if embedCfg.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+const (
+ embedUnknown = iota
+ embedBytes
+ embedString
+ embedFiles
+)
+
+var numLocalEmbed int
+
+func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
+ haveEmbed := false
+ for _, decl := range p.file.DeclList {
+ imp, ok := decl.(*syntax.ImportDecl)
+ if !ok {
+ // imports always come first
+ break
+ }
+ path, _ := strconv.Unquote(imp.Path.Value)
+ if path == "embed" {
+ haveEmbed = true
+ break
+ }
+ }
+
+ pos := embeds[0].Pos
+ if !haveEmbed {
+ p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
+ return exprs
+ }
+ if embedCfg.Patterns == nil {
+ p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
+ return exprs
+ }
+ if len(names) > 1 {
+ p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
+ return exprs
+ }
+ if len(exprs) > 0 {
+ p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
+ return exprs
+ }
+ if typ == nil {
+ // Should not happen, since len(exprs) == 0 now.
+ p.yyerrorpos(pos, "go:embed cannot apply to var without type")
+ return exprs
+ }
+
+ kind := embedKindApprox(typ)
+ if kind == embedUnknown {
+ p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
+ return exprs
+ }
+
+ // Build list of files to store.
+ have := make(map[string]bool)
+ var list []string
+ for _, e := range embeds {
+ for _, pattern := range e.Patterns {
+ files, ok := embedCfg.Patterns[pattern]
+ if !ok {
+ p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+ }
+ for _, file := range files {
+ if embedCfg.Files[file] == "" {
+ p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+ continue
+ }
+ if !have[file] {
+ have[file] = true
+ list = append(list, file)
+ }
+ if kind == embedFiles {
+ for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
+ have[dir] = true
+ list = append(list, dir+"/")
+ }
+ }
+ }
+ }
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return embedFileLess(list[i], list[j])
+ })
+
+ if kind == embedString || kind == embedBytes {
+ if len(list) > 1 {
+ p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
+ return exprs
+ }
+ }
+
+ v := names[0]
+ if dclcontext != PEXTERN {
+ numLocalEmbed++
+ v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
+ v.Sym.Def = asTypesNode(v)
+ v.Name.Param.Ntype = typ
+ v.SetClass(PEXTERN)
+ externdcl = append(externdcl, v)
+ exprs = []*Node{v}
+ }
+
+ v.Name.Param.SetEmbedFiles(list)
+ embedlist = append(embedlist, v)
+ return exprs
+}
+
+// embedKindApprox determines the kind of embedding variable, approximately.
+// The match is approximate because we haven't done scope resolution yet and
+// can't tell whether "string" and "byte" really mean "string" and "byte".
+// The result must be confirmed later, after type checking, using embedKind.
+func embedKindApprox(typ *Node) int {
+ if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ return embedFiles
+ }
+ // These are not guaranteed to match only string and []byte -
+ // maybe the local package has redefined one of those words.
+ // But it's the best we can do now during the noder.
+ // The stricter check happens later, in initEmbed calling embedKind.
+ if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
+ return embedString
+ }
+ if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+// embedKind determines the kind of embedding variable.
+func embedKind(typ *types.Type) int {
+ if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ return embedFiles
+ }
+ if typ == types.Types[TSTRING] {
+ return embedString
+ }
+ if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
+ if name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+// embedFileLess implements the sort order for a list of embedded files.
+// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
+func embedFileLess(x, y string) bool {
+ xdir, xelem, _ := embedFileNameSplit(x)
+ ydir, yelem, _ := embedFileNameSplit(y)
+ return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+func dumpembeds() {
+ for _, v := range embedlist {
+ initEmbed(v)
+ }
+}
+
+// initEmbed emits the init data for a //go:embed variable,
+// which is either a string, a []byte, or an embed.FS.
+func initEmbed(v *Node) {
+ files := v.Name.Param.EmbedFiles()
+ switch kind := embedKind(v.Type); kind {
+ case embedUnknown:
+ yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
+
+ case embedString, embedBytes:
+ file := files[0]
+ fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
+ if err != nil {
+ yyerrorl(v.Pos, "embed %s: %v", file, err)
+ }
+ sym := v.Sym.Linksym()
+ off := 0
+ off = dsymptr(sym, off, fsym, 0) // data string
+ off = duintptr(sym, off, uint64(size)) // len
+ if kind == embedBytes {
+ duintptr(sym, off, uint64(size)) // cap for slice
+ }
+
+ case embedFiles:
+ slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
+ off := 0
+ // []files pointed at by Files
+ off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
+ off = duintptr(slicedata, off, uint64(len(files)))
+ off = duintptr(slicedata, off, uint64(len(files)))
+
+ // embed/embed.go type file is:
+ // name string
+ // data string
+ // hash [16]byte
+ // Emit one of these per file in the set.
+ const hashSize = 16
+ hash := make([]byte, hashSize)
+ for _, file := range files {
+ off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
+ off = duintptr(slicedata, off, uint64(len(file)))
+ if strings.HasSuffix(file, "/") {
+ // entry for directory - no data
+ off = duintptr(slicedata, off, 0)
+ off = duintptr(slicedata, off, 0)
+ off += hashSize
+ } else {
+ fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
+ if err != nil {
+ yyerrorl(v.Pos, "embed %s: %v", file, err)
+ }
+ off = dsymptr(slicedata, off, fsym, 0) // data string
+ off = duintptr(slicedata, off, uint64(size))
+ off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
+ }
+ }
+ ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ sym := v.Sym.Linksym()
+ dsymptr(sym, 0, slicedata, 0)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index 375331d1f5..6f328ab5ea 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -169,36 +169,47 @@ func mayAffectMemory(n *Node) bool {
}
}
-func mustHeapAlloc(n *Node) bool {
+// heapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func heapAllocReason(n *Node) string {
if n.Type == nil {
- return false
+ return ""
}
// Parameters are always passed via the stack.
if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
- return false
+ return ""
}
if n.Type.Width > maxStackVarSize {
- return true
+ return "too large for stack"
}
if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
- return true
+ return "too large for stack"
}
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
- return true
+ return "too large for stack"
}
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
- return true
+ return "too large for stack"
}
- if n.Op == OMAKESLICE && !isSmallMakeSlice(n) {
- return true
+ if n.Op == OMAKESLICE {
+ r := n.Right
+ if r == nil {
+ r = n.Left
+ }
+ if !smallintconst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
}
- return false
+ return ""
}
// addrescapes tags node n as having had its address taken
@@ -271,7 +282,7 @@ func addrescapes(n *Node) {
// moveToHeap records the parameter or local variable n as moved to the heap.
func moveToHeap(n *Node) {
- if Debug['r'] != 0 {
+ if Debug.r != 0 {
Dump("MOVE", n)
}
if compiling_runtime {
@@ -348,7 +359,7 @@ func moveToHeap(n *Node) {
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(n.Pos, "moved to heap: %v", n)
}
}
@@ -378,7 +389,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.IsUintptr() {
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
@@ -393,11 +404,11 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Func.Pragma&Noescape != 0 {
- if Debug['m'] != 0 && f.Sym != nil {
+ if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "%v does not escape", name())
}
} else {
- if Debug['m'] != 0 && f.Sym != nil {
+ if Debug.m != 0 && f.Sym != nil {
Warnl(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
@@ -408,14 +419,14 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.IsUintptr() {
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
}
return uintptrEscapesTag
@@ -437,7 +448,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
esc := loc.paramEsc
esc.Optimize()
- if Debug['m'] != 0 && !loc.escapes {
+ if Debug.m != 0 && !loc.escapes {
if esc.Empty() {
Warnl(f.Pos, "%v does not escape", name())
}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
index d79d32ec48..618bdf78e2 100644
--- a/src/cmd/compile/internal/gc/escape.go
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -170,7 +170,7 @@ func (e *Escape) initFunc(fn *Node) {
Fatalf("unexpected node: %v", fn)
}
fn.Esc = EscFuncPlanned
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
Dump("escAnalyze", fn)
}
@@ -247,7 +247,7 @@ func (e *Escape) stmt(n *Node) {
lineno = lno
}()
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
}
@@ -275,11 +275,11 @@ func (e *Escape) stmt(n *Node) {
case OLABEL:
switch asNode(n.Sym.Label) {
case &nonlooping:
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
}
case &looping:
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
}
e.loopDepth++
@@ -717,7 +717,7 @@ func (e *Escape) addrs(l Nodes) []EscHole {
func (e *Escape) assign(dst, src *Node, why string, where *Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
- if ignore && Debug['m'] != 0 {
+ if ignore && Debug.m != 0 {
Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
@@ -771,10 +771,11 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
var fn *Node
switch call.Op {
case OCALLFUNC:
- if call.Left.Op == ONAME && call.Left.Class() == PFUNC {
- fn = call.Left
- } else if call.Left.Op == OCLOSURE {
- fn = call.Left.Func.Closure.Func.Nname
+ switch v := staticValue(call.Left); {
+ case v.Op == ONAME && v.Class() == PFUNC:
+ fn = v
+ case v.Op == OCLOSURE:
+ fn = v.Func.Closure.Func.Nname
}
case OCALLMETH:
fn = asNode(call.Left.Type.FuncType().Nname)
@@ -930,7 +931,7 @@ func (k EscHole) note(where *Node, why string) EscHole {
if where == nil || why == "" {
Fatalf("note: missing where/why")
}
- if Debug['m'] >= 2 || logopt.Enabled() {
+ if Debug.m >= 2 || logopt.Enabled() {
k.notes = &EscNote{
next: k.notes,
where: where,
@@ -1051,11 +1052,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
}
n.SetOpt(loc)
- if mustHeapAlloc(n) {
- why := "too large for stack"
- if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || (n.Right != nil && !Isconst(n.Right, CTINT))) {
- why = "non-constant size"
- }
+ if why := heapAllocReason(n); why != "" {
e.flow(e.heapHole().addr(n, why), loc)
}
}
@@ -1080,9 +1077,9 @@ func (e *Escape) flow(k EscHole, src *EscLocation) {
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
- if Debug['m'] >= 2 || logopt.Enabled() {
+ if Debug.m >= 2 || logopt.Enabled() {
pos := linestr(src.n.Pos)
- if Debug['m'] >= 2 {
+ if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
@@ -1182,8 +1179,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// that value flow for tagging the function
// later.
if l.isName(PPARAM) {
- if (logopt.Enabled() || Debug['m'] >= 2) && !l.escapes {
- if Debug['m'] >= 2 {
+ if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
+ if Debug.m >= 2 {
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
}
explanation := e.explainPath(root, l)
@@ -1199,8 +1196,8 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
- if logopt.Enabled() || Debug['m'] >= 2 {
- if Debug['m'] >= 2 {
+ if logopt.Enabled() || Debug.m >= 2 {
+ if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
}
explanation := e.explainPath(root, l)
@@ -1238,7 +1235,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
for {
// Prevent infinite loop.
if visited[src] {
- if Debug['m'] >= 2 {
+ if Debug.m >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
@@ -1266,7 +1263,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
- print := Debug['m'] >= 2
+ print := Debug.m >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
if print {
@@ -1420,7 +1417,7 @@ func (e *Escape) finish(fns []*Node) {
if loc.escapes {
if n.Op != ONAME {
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
Warnl(n.Pos, "%S escapes to heap", n)
}
if logopt.Enabled() {
@@ -1430,7 +1427,7 @@ func (e *Escape) finish(fns []*Node) {
n.Esc = EscHeap
addrescapes(n)
} else {
- if Debug['m'] != 0 && n.Op != ONAME {
+ if Debug.m != 0 && n.Op != ONAME {
Warnl(n.Pos, "%S does not escape", n)
}
n.Esc = EscNone
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 44bea2b1fd..c6917e0f81 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -31,7 +31,7 @@ func exportsym(n *Node) {
}
n.Sym.SetOnExportList(true)
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym)
}
@@ -96,7 +96,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
return n
}
-// pkgtype returns the named type declared by symbol s.
+// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
@@ -150,7 +150,7 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val
n.SetVal(val)
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
@@ -166,7 +166,7 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
n.Func = new(Func)
t.SetNname(asTypesNode(n))
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
@@ -179,7 +179,7 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
return
}
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
@@ -192,7 +192,7 @@ func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
return
}
- if Debug['E'] != 0 {
+ if Debug.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
}
}
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index 6ae363be22..c619d25705 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -6,17 +6,9 @@ package gc
import (
"math"
- "os"
- "runtime"
"testing"
)
-// For GO386=387, make sure fucomi* opcodes are not used
-// for comparison operations.
-// Note that this test will fail only on a Pentium MMX
-// processor (with GOARCH=386 GO386=387), as it just runs
-// some code and looks for an unimplemented instruction fault.
-
//go:noinline
func compare1(a, b float64) bool {
return a < b
@@ -137,9 +129,6 @@ func TestFloatCompareFolded(t *testing.T) {
}
}
-// For GO386=387, make sure fucomi* opcodes are not used
-// for float->int conversions.
-
//go:noinline
func cvt1(a float64) uint64 {
return uint64(a)
@@ -370,14 +359,6 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
// are not converted to quiet NaN (qNaN) values during compilation.
// See issue #27193 for more information.
- // TODO: this method for detecting 387 won't work if the compiler has been
- // built using GOARCH=386 GO386=387 and either the target is a different
- // architecture or the GO386=387 environment variable is not set when the
- // test is run.
- if runtime.GOARCH == "386" && os.Getenv("GO386") == "387" {
- t.Skip("signaling NaNs are not propagated on 387 (issue #27516)")
- }
-
// signaling NaNs
{
const nan = uint32(0x7f800001) // sNaN
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index d4af451506..d7ed1d2ff0 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -773,17 +773,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
var name string
switch t {
- case types.Idealbool:
+ case types.UntypedBool:
name = "untyped bool"
- case types.Idealstring:
+ case types.UntypedString:
name = "untyped string"
- case types.Idealint:
+ case types.UntypedInt:
name = "untyped int"
- case types.Idealrune:
+ case types.UntypedRune:
name = "untyped rune"
- case types.Idealfloat:
+ case types.UntypedFloat:
name = "untyped float"
- case types.Idealcomplex:
+ case types.UntypedComplex:
name = "untyped complex"
default:
name = basicnames[t.Etype]
@@ -792,6 +792,13 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
return
}
+ if mode == FDbg {
+ b.WriteString(t.Etype.String())
+ b.WriteByte('-')
+ tconv2(b, t, flag, FErr, visited)
+ return
+ }
+
// At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
// try to print it recursively.
// We record the offset in the result buffer where the type's text starts. This offset serves as a reference
@@ -805,12 +812,6 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
visited[t] = b.Len()
defer delete(visited, t)
- if mode == FDbg {
- b.WriteString(t.Etype.String())
- b.WriteByte('-')
- tconv2(b, t, flag, FErr, visited)
- return
- }
switch t.Etype {
case TPTR:
b.WriteByte('*')
@@ -1333,7 +1334,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
n.Orig.exprfmt(s, prec, mode)
return
}
- if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != types.Idealbool && n.Type != types.Idealstring {
+ if n.Type != nil && !n.Type.IsUntyped() {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 9079ce2afc..da6b6d6e72 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -61,12 +61,12 @@ type Class uint8
//go:generate stringer -type=Class
const (
Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
- PEXTERN // global variable
+ PEXTERN // global variables
PAUTO // local variables
- PAUTOHEAP // local variable or parameter moved to heap
+ PAUTOHEAP // local variables or parameters moved to heap
PPARAM // input arguments
PPARAMOUT // output results
- PFUNC // global function
+ PFUNC // global functions
// Careful: Class is stored in three bits in Node.flags.
_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
@@ -116,7 +116,15 @@ var decldepth int32
var nolocalimports bool
-var Debug [256]int
+// gc debug flags
+type DebugFlags struct {
+ P, B, C, E,
+ K, L, N, S,
+ W, e, h, j,
+ l, m, r, w int
+}
+
+var Debug DebugFlags
var debugstr string
@@ -259,7 +267,6 @@ type Arch struct {
REGSP int
MAXWIDTH int64
- Use387 bool // should 386 backend use 387 FP instructions instead of sse2.
SoftFloat bool
PadFrame func(int64) int64
@@ -328,10 +335,6 @@ var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
- // GO386=387
- ControlWord64trunc,
- ControlWord32 *obj.LSym
-
// Wasm
WasmMove,
WasmZero,
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 480d411f49..ce5182f203 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -153,7 +153,7 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
pp.clearp(pp.next)
p.Link = pp.next
- if !pp.pos.IsKnown() && Debug['K'] != 0 {
+ if !pp.pos.IsKnown() && Debug.K != 0 {
Warn("prog: unknown position (line 0)")
}
@@ -199,7 +199,7 @@ func (pp *Progs) settext(fn *Node) {
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
- fn.Func.lsym.Func.Text = ptxt
+ fn.Func.lsym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.Func.lsym
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
index b3f50b63af..9bc1f64600 100644
--- a/src/cmd/compile/internal/gc/iexport.go
+++ b/src/cmd/compile/internal/gc/iexport.go
@@ -751,11 +751,11 @@ func (w *exportWriter) param(f *types.Field) {
func constTypeOf(typ *types.Type) Ctype {
switch typ {
- case types.Idealint, types.Idealrune:
+ case types.UntypedInt, types.UntypedRune:
return CTINT
- case types.Idealfloat:
+ case types.UntypedFloat:
return CTFLT
- case types.Idealcomplex:
+ case types.UntypedComplex:
return CTCPLX
}
@@ -780,8 +780,8 @@ func constTypeOf(typ *types.Type) Ctype {
}
func (w *exportWriter) value(typ *types.Type, v Val) {
- if typ.IsUntyped() {
- typ = untype(v.Ctype())
+ if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt {
+ Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt)
}
w.typ(typ)
@@ -1017,6 +1017,8 @@ func (w *exportWriter) symIdx(s *types.Sym) {
}
func (w *exportWriter) typeExt(t *types.Type) {
+ // Export whether this type is marked notinheap.
+ w.bool(t.NotInHeap())
// For type T, export the index of type descriptor symbols of T and *T.
if i, ok := typeSymIdx[t]; ok {
w.int64(i[0])
@@ -1264,8 +1266,13 @@ func (w *exportWriter) expr(n *Node) {
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- // case OCALLPART:
- // unimplemented - handled by default case
+ case OCALLPART:
+ // An OCALLPART is an OXDOT before type checking.
+ w.op(OXDOT)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ // Right node should be ONAME
+ w.selector(n.Right.Sym)
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
w.op(OXDOT)
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
index 4169222c14..7f2b05f288 100644
--- a/src/cmd/compile/internal/gc/iimport.go
+++ b/src/cmd/compile/internal/gc/iimport.go
@@ -375,7 +375,7 @@ func (p *importReader) value() (typ *types.Type, v Val) {
v.U = p.string()
case CTINT:
x := new(Mpint)
- x.Rune = typ == types.Idealrune
+ x.Rune = typ == types.UntypedRune
p.mpint(&x.Val, typ)
v.U = x
case CTFLT:
@@ -596,7 +596,6 @@ func (r *importReader) typ1() *types.Type {
// Ensure we expand the interface in the frontend (#25055).
checkwidth(t)
-
return t
}
}
@@ -711,6 +710,7 @@ func (r *importReader) symIdx(s *types.Sym) {
}
func (r *importReader) typeExt(t *types.Type) {
+ t.SetNotInHeap(r.bool())
i, pi := r.int64(), r.int64()
if i != -1 && pi != -1 {
typeSymIdx[t] = [2]int64{i, pi}
@@ -742,8 +742,8 @@ func (r *importReader) doInline(n *Node) {
importlist = append(importlist, n)
- if Debug['E'] > 0 && Debug['m'] > 2 {
- if Debug['m'] > 3 {
+ if Debug.E > 0 && Debug.m > 2 {
+ if Debug.m > 3 {
fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
} else {
fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
@@ -866,7 +866,7 @@ func (r *importReader) node() *Node {
// unreachable - handled in case OSTRUCTLIT by elemList
// case OCALLPART:
- // unimplemented
+ // unreachable - mapped to case OXDOT below by exporter
// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
// unreachable - mapped to case OXDOT below by exporter
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index fa5b3ec698..a2fb00e132 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -7,7 +7,7 @@
// saves a copy of the body. Then inlcalls walks each function body to
// expand calls to inlinable functions.
//
-// The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1,
+// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
// are not supported.
// 0: disabled
@@ -21,7 +21,7 @@
// The -d typcheckinl flag enables early typechecking of all imported bodies,
// which is useful to flush out bugs.
//
-// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
+// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
// which calls get inlined or not, more is for debugging, and may go away at any point.
package gc
@@ -85,7 +85,7 @@ func typecheckinl(fn *Node) {
return // typecheckinl on local function
}
- if Debug['m'] > 2 || Debug_export != 0 {
+ if Debug.m > 2 || Debug_export != 0 {
fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
}
@@ -116,10 +116,10 @@ func caninl(fn *Node) {
}
var reason string // reason, if any, that the function was not inlined
- if Debug['m'] > 1 || logopt.Enabled() {
+ if Debug.m > 1 || logopt.Enabled() {
defer func() {
if reason != "" {
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
}
if logopt.Enabled() {
@@ -187,7 +187,7 @@ func caninl(fn *Node) {
defer n.Func.SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
- if Debug['l'] == 4 {
+ if Debug.l == 4 {
cc = 1 // this appears to yield better performance than 0.
}
@@ -224,9 +224,9 @@ func caninl(fn *Node) {
// this is so export can find the body of a method
fn.Type.FuncType().Nname = asTypesNode(n)
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
- } else if Debug['m'] != 0 {
+ } else if Debug.m != 0 {
fmt.Printf("%v: can inline %v\n", fn.Line(), n)
}
if logopt.Enabled() {
@@ -325,18 +325,10 @@ func (v *hairyVisitor) visit(n *Node) bool {
break
}
- if fn := n.Left.Func; fn != nil && fn.Inl != nil {
- v.budget -= fn.Inl.Cost
+ if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ v.budget -= fn.Func.Inl.Cost
break
}
- if n.Left.isMethodExpression() {
- if d := asNode(n.Left.Sym.Def); d != nil && d.Func.Inl != nil {
- v.budget -= d.Func.Inl.Cost
- break
- }
- }
- // TODO(mdempsky): Budget for OCLOSURE calls if we
- // ever allow that. See #15561 and #23093.
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
@@ -382,17 +374,16 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.reason = "call to recover"
return true
+ case OCALLPART:
+ // OCALLPART is inlineable, but no extra cost to the budget
+
case OCLOSURE,
- OCALLPART,
ORANGE,
- OFOR,
- OFORUNTIL,
OSELECT,
OTYPESW,
OGO,
ODEFER,
ODCLTYPE, // can't print yet
- OBREAK,
ORETJMP:
v.reason = "unhandled op " + n.Op.String()
return true
@@ -400,10 +391,23 @@ func (v *hairyVisitor) visit(n *Node) bool {
case OAPPEND:
v.budget -= inlineExtraAppendCost
- case ODCLCONST, OEMPTY, OFALL, OLABEL:
+ case ODCLCONST, OEMPTY, OFALL:
// These nodes don't produce code; omit from inlining budget.
return false
+ case OLABEL:
+ // TODO(mdempsky): Add support for inlining labeled control statements.
+ if n.labeledControl() != nil {
+ v.reason = "labeled control"
+ return true
+ }
+
+ case OBREAK, OCONTINUE:
+ if n.Sym != nil {
+ // Should have short-circuited due to labeledControl above.
+ Fatalf("unexpected labeled break/continue: %v", n)
+ }
+
case OIF:
if Isconst(n.Left, CTBOOL) {
// This if and the condition cost nothing.
@@ -421,7 +425,7 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget--
// When debugging, don't stop early, to get full cost of inlining this function
- if v.budget < 0 && Debug['m'] < 2 && !logopt.Enabled() {
+ if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
return true
}
@@ -452,7 +456,7 @@ func inlcopy(n *Node) *Node {
}
m := n.copy()
- if m.Func != nil {
+ if n.Op != OCALLPART && m.Func != nil {
Fatalf("unexpected Func: %v", m)
}
m.Left = inlcopy(n.Left)
@@ -666,60 +670,18 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
switch n.Op {
case OCALLFUNC:
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
}
- if n.Left.Func != nil && n.Left.Func.Inl != nil && !isIntrinsicCall(n) { // normal case
- n = mkinlcall(n, n.Left, maxCost, inlMap)
- } else if n.Left.isMethodExpression() && asNode(n.Left.Sym.Def) != nil {
- n = mkinlcall(n, asNode(n.Left.Sym.Def), maxCost, inlMap)
- } else if n.Left.Op == OCLOSURE {
- if f := inlinableClosure(n.Left); f != nil {
- n = mkinlcall(n, f, maxCost, inlMap)
- }
- } else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil {
- if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE {
- if f := inlinableClosure(d.Right); f != nil {
- // NB: this check is necessary to prevent indirect re-assignment of the variable
- // having the address taken after the invocation or only used for reads is actually fine
- // but we have no easy way to distinguish the safe cases
- if d.Left.Name.Addrtaken() {
- if Debug['m'] > 1 {
- fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left)
- }
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%v cannot be inlined (escaping closure variable)", n.Left))
- }
- break
- }
-
- // ensure the variable is never re-assigned
- if unsafe, a := reassigned(n.Left); unsafe {
- if Debug['m'] > 1 {
- if a != nil {
- fmt.Printf("%v: cannot inline re-assigned closure variable at %v: %v\n", n.Line(), a.Line(), a)
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%v cannot be inlined (re-assigned closure variable)", a))
- }
- } else {
- fmt.Printf("%v: cannot inline global closure variable %v\n", n.Line(), n.Left)
- if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%v cannot be inlined (global closure variable)", n.Left))
- }
- }
- }
- break
- }
- n = mkinlcall(n, f, maxCost, inlMap)
- }
- }
+ if isIntrinsicCall(n) {
+ break
+ }
+ if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ n = mkinlcall(n, fn, maxCost, inlMap)
}
case OCALLMETH:
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
}
@@ -739,16 +701,73 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
return n
}
-// inlinableClosure takes an OCLOSURE node and follows linkage to the matching ONAME with
-// the inlinable body. Returns nil if the function is not inlinable.
-func inlinableClosure(n *Node) *Node {
- c := n.Func.Closure
- caninl(c)
- f := c.Func.Nname
- if f == nil || f.Func.Inl == nil {
+// inlCallee takes a function-typed expression and returns the underlying function ONAME
+// that it refers to if statically known. Otherwise, it returns nil.
+func inlCallee(fn *Node) *Node {
+ fn = staticValue(fn)
+ switch {
+ case fn.Op == ONAME && fn.Class() == PFUNC:
+ if fn.isMethodExpression() {
+ return asNode(fn.Sym.Def)
+ }
+ return fn
+ case fn.Op == OCLOSURE:
+ c := fn.Func.Closure
+ caninl(c)
+ return c.Func.Nname
+ }
+ return nil
+}
+
+func staticValue(n *Node) *Node {
+ for {
+ n1 := staticValue1(n)
+ if n1 == nil {
+ return n
+ }
+ n = n1
+ }
+}
+
+// staticValue1 implements a simple SSA-like optimization. If n is a local variable
+// that is initialized and never reassigned, staticValue1 returns the initializer
+// expression. Otherwise, it returns nil.
+func staticValue1(n *Node) *Node {
+ if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() {
+ return nil
+ }
+
+ defn := n.Name.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs *Node
+FindRHS:
+ switch defn.Op {
+ case OAS:
+ rhs = defn.Right
+ case OAS2:
+ for i, lhs := range defn.List.Slice() {
+ if lhs == n {
+ rhs = defn.Rlist.Index(i)
+ break FindRHS
+ }
+ }
+ Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ Fatalf("RHS is nil: %v", defn)
+ }
+
+ unsafe, _ := reassigned(n)
+ if unsafe {
return nil
}
- return f
+
+ return rhs
}
// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
@@ -831,16 +850,19 @@ func (v *reassignVisitor) visitList(l Nodes) *Node {
return nil
}
-func tinlvar(t *types.Field, inlvars map[*Node]*Node) *Node {
- if n := asNode(t.Nname); n != nil && !n.isBlank() {
- inlvar := inlvars[n]
- if inlvar == nil {
- Fatalf("missing inlvar for %v\n", n)
- }
- return inlvar
+func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node {
+ n := asNode(t.Nname)
+ if n == nil || n.isBlank() {
+ return nblank
}
- return typecheck(nblank, ctxExpr|ctxAssign)
+ inlvar := inlvars[n]
+ if inlvar == nil {
+ Fatalf("missing inlvar for %v", n)
+ }
+ as.Ninit.Append(nod(ODCL, inlvar, nil))
+ inlvar.Name.Defn = as
+ return inlvar
}
var inlgen int
@@ -889,7 +911,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
if inlMap[fn] {
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
}
return n
@@ -903,12 +925,12 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
// We have a function node, and it has an inlineable body.
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
- } else if Debug['m'] != 0 {
+ } else if Debug.m != 0 {
fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
}
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
}
@@ -970,14 +992,15 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
continue
}
if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
- continue
- }
- inlvars[ln] = typecheck(inlvar(ln), ctxExpr)
- if ln.Class() == PPARAM || ln.Name.Param.Stackcopy != nil && ln.Name.Param.Stackcopy.Class() == PPARAM {
- ninit.Append(nod(ODCL, inlvars[ln], nil))
+ // TODO(mdempsky): Remove once I'm confident
+ // this never actually happens. We currently
+ // perform inlining before escape analysis, so
+ // nothing should have moved to the heap yet.
+ Fatalf("impossible: %v", ln)
}
+ inlf := typecheck(inlvar(ln), ctxExpr)
+ inlvars[ln] = inlf
if genDwarfInline > 0 {
- inlf := inlvars[ln]
if ln.Class() == PPARAM {
inlf.Name.SetInlFormal(true)
} else {
@@ -1019,56 +1042,42 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// Assign arguments to the parameters' temp names.
as := nod(OAS2, nil, nil)
- as.Rlist.Set(n.List.Slice())
+ as.SetColas(true)
+ if n.Op == OCALLMETH {
+ if n.Left.Left == nil {
+ Fatalf("method call without receiver: %+v", n)
+ }
+ as.Rlist.Append(n.Left.Left)
+ }
+ as.Rlist.Append(n.List.Slice()...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas *Node
- if fn.IsMethod() {
- rcv := fn.Type.Recv()
-
- if n.Left.Op == ODOTMETH {
- // For x.M(...), assign x directly to the
- // receiver parameter.
- if n.Left.Left == nil {
- Fatalf("method call without receiver: %+v", n)
- }
- ras := nod(OAS, tinlvar(rcv, inlvars), n.Left.Left)
- ras = typecheck(ras, ctxStmt)
- ninit.Append(ras)
- } else {
- // For T.M(...), add the receiver parameter to
- // as.List, so it's assigned by the normal
- // arguments.
- if as.Rlist.Len() == 0 {
- Fatalf("non-method call to method without first arg: %+v", n)
- }
- as.List.Append(tinlvar(rcv, inlvars))
- }
+ if recv := fn.Type.Recv(); recv != nil {
+ as.List.Append(inlParam(recv, as, inlvars))
}
-
for _, param := range fn.Type.Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD() {
- as.List.Append(tinlvar(param, inlvars))
+ as.List.Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
- numvals := n.List.Len()
-
x := as.List.Len()
- for as.List.Len() < numvals {
+ for as.List.Len() < as.Rlist.Len() {
as.List.Append(argvar(param.Type, as.List.Len()))
}
varargs := as.List.Slice()[x:]
- vas = nod(OAS, tinlvar(param, inlvars), nil)
+ vas = nod(OAS, nil, nil)
+ vas.Left = inlParam(param, vas, inlvars)
if len(varargs) == 0 {
vas.Right = nodnil()
vas.Right.Type = param.Type
@@ -1165,7 +1174,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
}
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
}
@@ -1176,7 +1185,7 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
func inlvar(var_ *Node) *Node {
- if Debug['m'] > 3 {
+ if Debug.m > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
@@ -1255,13 +1264,13 @@ func (subst *inlsubst) node(n *Node) *Node {
switch n.Op {
case ONAME:
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
}
return inlvar
}
- if Debug['m'] > 2 {
+ if Debug.m > 2 {
fmt.Printf("not substituting name %+v\n", n)
}
return n
diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go
index 9d3b8c59fd..afa6b98315 100644
--- a/src/cmd/compile/internal/gc/inl_test.go
+++ b/src/cmd/compile/internal/gc/inl_test.go
@@ -83,7 +83,7 @@ func TestIntendedInlining(t *testing.T) {
"puintptr.ptr",
"spanOf",
"spanOfUnchecked",
- //"(*gcWork).putFast", // TODO(austin): For debugging #27993
+ "(*gcWork).putFast",
"(*gcWork).tryGetFast",
"(*guintptr).set",
"(*markBits).advance",
@@ -115,6 +115,7 @@ func TestIntendedInlining(t *testing.T) {
"byLiteral.Len",
"byLiteral.Less",
"byLiteral.Swap",
+ "(*dictDecoder).tryWriteCopy",
},
"encoding/base64": {
"assemble32",
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
index 1a344c6566..7cce371408 100644
--- a/src/cmd/compile/internal/gc/lex.go
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -48,8 +48,11 @@ const (
Nowritebarrierrec // error on write barrier in this or recursive callees
Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
- // Runtime-only type pragmas
+ // Runtime and cgo type pragmas
NotInHeap // values of this type must not be heap allocated
+
+ // Go command pragmas
+ GoBuildPragma
)
const (
@@ -71,6 +74,8 @@ const (
func pragmaFlag(verb string) PragmaFlag {
switch verb {
+ case "go:build":
+ return GoBuildPragma
case "go:nointerface":
if objabi.Fieldtrack_enabled != 0 {
return Nointerface
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 7ad3bfe0c8..0b65e8a0b4 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -34,8 +34,6 @@ import (
"strings"
)
-var imported_unsafe bool
-
var (
buildid string
spectre string
@@ -132,7 +130,7 @@ func hidePanic() {
// supportsDynlink reports whether or not the code generator for the given
// architecture supports the -shared and -dynlink flags.
func supportsDynlink(arch *sys.Arch) bool {
- return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.S390X)
+ return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
}
// timing data for compiler phases
@@ -211,18 +209,27 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
- objabi.Flagcount("%", "debug non-static initializers", &Debug['%'])
- objabi.Flagcount("B", "disable bounds checking", &Debug['B'])
- objabi.Flagcount("C", "disable printing of columns in error messages", &Debug['C']) // TODO(gri) remove eventually
flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
- objabi.Flagcount("E", "debug symbol export", &Debug['E'])
+
+ objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
+ objabi.Flagcount("B", "disable bounds checking", &Debug.B)
+ objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
+ objabi.Flagcount("E", "debug symbol export", &Debug.E)
+ objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
+ objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
+ objabi.Flagcount("N", "disable optimizations", &Debug.N)
+ objabi.Flagcount("S", "print assembly listing", &Debug.S)
+ objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
+ objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
+ objabi.Flagcount("h", "halt on error", &Debug.h)
+ objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
+ objabi.Flagcount("l", "disable inlining", &Debug.l)
+ objabi.Flagcount("m", "print optimization decisions", &Debug.m)
+ objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
+ objabi.Flagcount("w", "debug type checking", &Debug.w)
+
objabi.Flagfn1("I", "add `directory` to import search path", addidir)
- objabi.Flagcount("K", "debug missing line numbers", &Debug['K'])
- objabi.Flagcount("L", "show full file names in error messages", &Debug['L'])
- objabi.Flagcount("N", "disable optimizations", &Debug['N'])
- objabi.Flagcount("S", "print assembly listing", &Debug['S'])
objabi.AddVersionFlag() // -V
- objabi.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
@@ -231,17 +238,13 @@ func Main(archInit func(*Arch)) {
flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
- objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
- objabi.Flagcount("h", "halt on error", &Debug['h'])
+ objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
- objabi.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
- objabi.Flagcount("l", "disable inlining", &Debug['l'])
flag.StringVar(&flag_lang, "lang", "", "release to compile for")
flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
objabi.Flagcount("live", "debug liveness analysis", &debuglive)
- objabi.Flagcount("m", "print optimization decisions", &Debug['m'])
if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
}
@@ -249,7 +252,6 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&outfile, "o", "", "write output to `file`")
flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
- objabi.Flagcount("r", "debug generated wrappers", &Debug['r'])
if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
flag.BoolVar(&flag_race, "race", false, "enable race detector")
}
@@ -259,7 +261,6 @@ func Main(archInit func(*Arch)) {
}
flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
- objabi.Flagcount("w", "debug type checking", &Debug['w'])
flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
var flag_shared bool
var flag_dynlink bool
@@ -325,9 +326,9 @@ func Main(archInit func(*Arch)) {
Ctxt.Flag_shared = flag_dynlink || flag_shared
Ctxt.Flag_dynlink = flag_dynlink
- Ctxt.Flag_optimize = Debug['N'] == 0
+ Ctxt.Flag_optimize = Debug.N == 0
- Ctxt.Debugasm = Debug['S']
+ Ctxt.Debugasm = Debug.S
Ctxt.Debugvlog = Debug_vlog
if flagDWARF {
Ctxt.DebugInfo = debuginfo
@@ -399,7 +400,7 @@ func Main(archInit func(*Arch)) {
instrumenting = true
}
- if compiling_runtime && Debug['N'] != 0 {
+ if compiling_runtime && Debug.N != 0 {
log.Fatal("cannot disable optimizations while compiling runtime")
}
if nBackendWorkers < 1 {
@@ -504,11 +505,11 @@ func Main(archInit func(*Arch)) {
}
// enable inlining. for now:
- // default: inlining on. (debug['l'] == 1)
- // -l: inlining off (debug['l'] == 0)
- // -l=2, -l=3: inlining on again, with extra debugging (debug['l'] > 1)
- if Debug['l'] <= 1 {
- Debug['l'] = 1 - Debug['l']
+ // default: inlining on. (Debug.l == 1)
+ // -l: inlining off (Debug.l == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
+ if Debug.l <= 1 {
+ Debug.l = 1 - Debug.l
}
if jsonLogOpt != "" { // parse version,destination from json logging optimization.
@@ -516,6 +517,7 @@ func Main(archInit func(*Arch)) {
}
ssaDump = os.Getenv("GOSSAFUNC")
+ ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
if strings.HasSuffix(ssaDump, "+") {
ssaDump = ssaDump[:len(ssaDump)-1]
@@ -594,7 +596,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias) {
+ if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
xtop[i] = typecheck(n, ctxStmt)
}
}
@@ -606,7 +608,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias {
+ if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
xtop[i] = typecheck(n, ctxStmt)
}
}
@@ -665,7 +667,7 @@ func Main(archInit func(*Arch)) {
// Phase 5: Inlining
timings.Start("fe", "inlining")
if Debug_typecheckinl != 0 {
- // Typecheck imported function bodies if debug['l'] > 1,
+ // Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
if n.Func.Inl != nil {
@@ -679,7 +681,7 @@ func Main(archInit func(*Arch)) {
}
}
- if Debug['l'] != 0 {
+ if Debug.l != 0 {
// Find functions that can be inlined and clone them before walk expands them.
visitBottomUp(xtop, func(list []*Node, recursive bool) {
numfns := numNonClosures(list)
@@ -690,7 +692,7 @@ func Main(archInit func(*Arch)) {
// across more than one function.
caninl(n)
} else {
- if Debug['m'] > 1 {
+ if Debug.m > 1 {
fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
}
}
@@ -967,9 +969,10 @@ func readSymABIs(file, myimportpath string) {
if len(parts) != 3 {
log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
}
- sym, abi := parts[1], parts[2]
- if abi != "ABI0" { // Only supported external ABI right now
- log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abi)
+ sym, abistr := parts[1], parts[2]
+ abi, valid := obj.ParseABI(abistr)
+ if !valid {
+ log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
}
// If the symbol is already prefixed with
@@ -982,9 +985,9 @@ func readSymABIs(file, myimportpath string) {
// Record for later.
if parts[0] == "def" {
- symabiDefs[sym] = obj.ABI0
+ symabiDefs[sym] = abi
} else {
- symabiRefs[sym] = obj.ABI0
+ symabiRefs[sym] = abi
}
default:
log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
@@ -1173,7 +1176,6 @@ func importfile(f *Val) *types.Pkg {
}
if path_ == "unsafe" {
- imported_unsafe = true
return unsafepkg
}
@@ -1406,29 +1408,34 @@ func IsAlias(sym *types.Sym) bool {
return sym.Def != nil && asNode(sym.Def).Sym != sym
}
-// By default, assume any debug flags are incompatible with concurrent compilation.
-// A few are safe and potentially in common use for normal compiles, though; mark them as such here.
-var concurrentFlagOK = [256]bool{
- 'B': true, // disabled bounds checking
- 'C': true, // disable printing of columns in error messages
- 'e': true, // no limit on errors; errors all come from non-concurrent code
- 'I': true, // add `directory` to import search path
- 'N': true, // disable optimizations
- 'l': true, // disable inlining
- 'w': true, // all printing happens before compilation
- 'W': true, // all printing happens before compilation
- 'S': true, // printing disassembly happens at the end (but see concurrentBackendAllowed below)
+// By default, assume any debug flags are incompatible with concurrent
+// compilation. A few are safe and potentially in common use for
+// normal compiles, though; return true for those.
+func concurrentFlagOk() bool {
+ // Report whether any debug flag that would prevent concurrent
+ // compilation is set, by zeroing out the allowed ones and then
+ // checking if the resulting struct is zero.
+ d := Debug
+ d.B = 0 // disable bounds checking
+ d.C = 0 // disable printing of columns in error messages
+ d.e = 0 // no limit on errors; errors all come from non-concurrent code
+ d.N = 0 // disable optimizations
+ d.l = 0 // disable inlining
+ d.w = 0 // all printing happens before compilation
+ d.W = 0 // all printing happens before compilation
+ d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
+
+ return d == DebugFlags{}
}
func concurrentBackendAllowed() bool {
- for i, x := range &Debug {
- if x != 0 && !concurrentFlagOK[i] {
- return false
- }
+ if !concurrentFlagOk() {
+ return false
}
- // Debug['S'] by itself is ok, because all printing occurs
+
+ // Debug.S by itself is ok, because all printing occurs
// while writing the object file, and that is non-concurrent.
- // Adding Debug_vlog, however, causes Debug['S'] to also print
+ // Adding Debug_vlog, however, causes Debug.S to also print
// while flushing the plist, which happens concurrently.
if Debug_vlog || debugstr != "" || debuglive > 0 {
return false
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
index 5dce533e4b..67d24ef0bc 100644
--- a/src/cmd/compile/internal/gc/noder.go
+++ b/src/cmd/compile/internal/gc/noder.go
@@ -11,6 +11,7 @@ import (
"runtime"
"strconv"
"strings"
+ "unicode"
"unicode/utf8"
"cmd/compile/internal/syntax"
@@ -90,7 +91,11 @@ func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
} else {
// line directive base
p0 := b0.Pos()
- p1 := src.MakePos(p.makeSrcPosBase(p0.Base()), p0.Line(), p0.Col())
+ p0b := p0.Base()
+ if p0b == b0 {
+ panic("infinite recursion in makeSrcPosBase")
+ }
+ p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col())
b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
}
p.basemap[b0] = b1
@@ -130,11 +135,13 @@ type noder struct {
base *src.PosBase
}
- file *syntax.File
- linknames []linkname
- pragcgobuf [][]string
- err chan syntax.Error
- scope ScopeID
+ file *syntax.File
+ linknames []linkname
+ pragcgobuf [][]string
+ err chan syntax.Error
+ scope ScopeID
+ importedUnsafe bool
+ importedEmbed bool
// scopeVars is a stack tracking the number of variables declared in the
// current function at the moment each open scope was opened.
@@ -236,19 +243,21 @@ type linkname struct {
func (p *noder) node() {
types.Block = 1
- imported_unsafe = false
+ p.importedUnsafe = false
+ p.importedEmbed = false
p.setlineno(p.file.PkgName)
mkpackage(p.file.PkgName.Value)
if pragma, ok := p.file.Pragma.(*Pragma); ok {
+ pragma.Flag &^= GoBuildPragma
p.checkUnused(pragma)
}
xtop = append(xtop, p.decls(p.file.DeclList)...)
for _, n := range p.linknames {
- if !imported_unsafe {
+ if !p.importedUnsafe {
p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
@@ -323,7 +332,6 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
val := p.basicLit(imp.Path)
ipkg := importfile(&val)
-
if ipkg == nil {
if nerrors == 0 {
Fatalf("phase error in import")
@@ -331,6 +339,13 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return
}
+ if ipkg == unsafepkg {
+ p.importedUnsafe = true
+ }
+ if ipkg.Path == "embed" {
+ p.importedEmbed = true
+ }
+
ipkg.Direct = true
var my *types.Sym
@@ -372,6 +387,20 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
}
if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if len(pragma.Embeds) > 0 {
+ if !p.importedEmbed {
+ // This check can't be done when building the list pragma.Embeds
+ // because that list is created before the noder starts walking over the file,
+ // so at that point it hasn't seen the imports.
+ // We're left to check now, just before applying the //go:embed lines.
+ for _, e := range pragma.Embeds {
+ p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
+ }
+ } else {
+ exprs = varEmbed(p, names, typ, exprs, pragma.Embeds)
+ }
+ pragma.Embeds = nil
+ }
p.checkUnused(pragma)
}
@@ -454,17 +483,17 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
param := n.Name.Param
param.Ntype = typ
- param.Alias = decl.Alias
+ param.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
if !decl.Alias {
- param.Pragma = pragma.Flag & TypePragmas
+ param.SetPragma(pragma.Flag & TypePragmas)
pragma.Flag &^= TypePragmas
}
p.checkUnused(pragma)
}
nod := p.nod(decl, ODCLTYPE, n, nil)
- if param.Alias && !langSupported(1, 9, localpkg) {
+ if param.Alias() && !langSupported(1, 9, localpkg) {
yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
}
return nod
@@ -773,7 +802,7 @@ func (p *noder) sum(x syntax.Expr) *Node {
n := p.expr(x)
if Isconst(n, CTSTR) && n.Sym == nil {
nstr = n
- chunks = append(chunks, strlit(nstr))
+ chunks = append(chunks, nstr.StringVal())
}
for i := len(adds) - 1; i >= 0; i-- {
@@ -783,12 +812,12 @@ func (p *noder) sum(x syntax.Expr) *Node {
if Isconst(r, CTSTR) && r.Sym == nil {
if nstr != nil {
// Collapse r into nstr instead of adding to n.
- chunks = append(chunks, strlit(r))
+ chunks = append(chunks, r.StringVal())
continue
}
nstr = r
- chunks = append(chunks, strlit(nstr))
+ chunks = append(chunks, nstr.StringVal())
} else {
if len(chunks) > 1 {
nstr.SetVal(Val{U: strings.Join(chunks, "")})
@@ -1437,11 +1466,6 @@ func (p *noder) mkname(name *syntax.Name) *Node {
return mkname(p.name(name))
}
-func (p *noder) newname(name *syntax.Name) *Node {
- // TODO(mdempsky): Set line number?
- return newname(p.name(name))
-}
-
func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
@@ -1497,13 +1521,15 @@ var allowedStdPragmas = map[string]bool{
"go:cgo_import_dynamic": true,
"go:cgo_ldflag": true,
"go:cgo_dynamic_linker": true,
+ "go:embed": true,
"go:generate": true,
}
// *Pragma is the value stored in a syntax.Pragma during parsing.
type Pragma struct {
- Flag PragmaFlag // collected bits
- Pos []PragmaPos // position of each individual flag
+ Flag PragmaFlag // collected bits
+ Pos []PragmaPos // position of each individual flag
+ Embeds []PragmaEmbed
}
type PragmaPos struct {
@@ -1511,12 +1537,22 @@ type PragmaPos struct {
Pos syntax.Pos
}
+type PragmaEmbed struct {
+ Pos syntax.Pos
+ Patterns []string
+}
+
func (p *noder) checkUnused(pragma *Pragma) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.yyerrorpos(pos.Pos, "misplaced compiler directive")
}
}
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.yyerrorpos(e.Pos, "misplaced go:embed directive")
+ }
+ }
}
func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
@@ -1525,6 +1561,11 @@ func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
}
}
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
+ }
+ }
}
// pragma is called concurrently if files are parsed concurrently.
@@ -1569,6 +1610,17 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
}
p.linknames = append(p.linknames, linkname{pos, f[1], target})
+ case text == "go:embed", strings.HasPrefix(text, "go:embed "):
+ args, err := parseGoEmbed(text[len("go:embed"):])
+ if err != nil {
+ p.error(syntax.Error{Pos: pos, Msg: err.Error()})
+ }
+ if len(args) == 0 {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
+ break
+ }
+ pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
+
case strings.HasPrefix(text, "go:cgo_import_dynamic "):
// This is permitted for general use because Solaris
// code relies on it in golang.org/x/sys/unix and others.
@@ -1641,3 +1693,64 @@ func mkname(sym *types.Sym) *Node {
}
return n
}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// go/build/read.go also processes these strings and contains similar logic.
+func parseGoEmbed(args string) ([]string, error) {
+ var list []string
+ for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
+ var path string
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ args = args[i:]
+
+ case '`':
+ i := strings.Index(args[1:], "`")
+ if i < 0 {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ path = args[1 : 1+i]
+ args = args[1+i+1:]
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ args = args[i+1:]
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, path)
+ }
+ return list, nil
+}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index b55331a948..226eb45252 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -14,6 +14,8 @@ import (
"encoding/json"
"fmt"
"io"
+ "io/ioutil"
+ "os"
"sort"
"strconv"
)
@@ -125,6 +127,7 @@ func dumpdata() {
itabsLen := len(itabs)
dumpimportstrings()
dumpbasictypes()
+ dumpembeds()
// Calls to dumpsignats can generate functions,
// like method wrappers and hash and equality routines.
@@ -272,7 +275,7 @@ func dumpGlobalConst(n *Node) {
default:
return
}
- Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64())
+ Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
}
func dumpglobls() {
@@ -305,20 +308,21 @@ func dumpglobls() {
// global symbols can't be declared during parallel compilation.
func addGCLocals() {
for _, s := range Ctxt.Text {
- if s.Func == nil {
+ fn := s.Func()
+ if fn == nil {
continue
}
- for _, gcsym := range []*obj.LSym{s.Func.GCArgs, s.Func.GCLocals, s.Func.GCRegs} {
+ for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals, fn.GCRegs} {
if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
}
- if x := s.Func.StackObjects; x != nil {
+ if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
ggloblsym(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
- if x := s.Func.OpenCodedDeferInfo; x != nil {
+ if x := fn.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
}
@@ -357,28 +361,31 @@ func dbvec(s *obj.LSym, off int, bv bvec) int {
return off
}
+const (
+ stringSymPrefix = "go.string."
+ stringSymPattern = ".gostring.%d.%x"
+)
+
+// stringsym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
+ // Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
- symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil))
+ symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
- const prefix = "go.string."
- symdataname := prefix + symname
-
- symdata := Ctxt.Lookup(symdataname)
-
+ symdata := Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
- // string data
- off := dsname(symdata, 0, s, pos, "string")
+ off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
@@ -386,26 +393,122 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
return symdata
}
-var slicebytes_gen int
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ if !info.Mode().IsRegular() {
+ return nil, 0, fmt.Errorf("not a regular file")
+ }
+ size := info.Size()
+ if size <= 1*1024 {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if int64(len(data)) != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ var sym *obj.LSym
+ if readonly {
+ sym = stringsym(pos, string(data))
+ } else {
+ sym = slicedata(pos, string(data)).Sym.Linksym()
+ }
+ if len(hash) > 0 {
+ sum := sha256.Sum256(data)
+ copy(hash, sum[:])
+ }
+ return sym, size, nil
+ }
+ if size > 2e9 {
+ // ggloblsym takes an int32,
+ // and probably the rest of the toolchain
+ // can't handle such big symbols either.
+ // See golang.org/issue/9862.
+ return nil, 0, fmt.Errorf("file too large")
+ }
-func slicebytes(nam *Node, s string) {
- slicebytes_gen++
- symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
+ // File is too big to read and keep in memory.
+ // Compute hash if needed for read-only content hashing or if the caller wants it.
+ var sum []byte
+ if readonly || len(hash) > 0 {
+ h := sha256.New()
+ n, err := io.Copy(h, f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ sum = h.Sum(nil)
+ copy(hash, sum)
+ }
+
+ var symdata *obj.LSym
+ if readonly {
+ symname := fmt.Sprintf(stringSymPattern, size, sum)
+ symdata = Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ // Note: AttrContentAddressable cannot be set here,
+ // because the content-addressable-handling code
+ // does not know about file symbols.
+ }
+ } else {
+ // Emit a zero-length data symbol
+ // and then fix up length and content to use file.
+ symdata = slicedata(pos, "").Sym.Linksym()
+ symdata.Size = size
+ symdata.Type = objabi.SNOPTRDATA
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ }
+
+ return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *Node {
+ slicedataGen++
+ symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := localpkg.Lookup(symname)
symnode := newname(sym)
sym.Def = asTypesNode(symnode)
lsym := sym.Linksym()
- off := dsname(lsym, 0, s, nam.Pos, "slice")
+ off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+ return symnode
+}
+
+func slicebytes(nam *Node, s string) {
if nam.Op != ONAME {
Fatalf("slicebytes %v", nam)
}
- slicesym(nam, symnode, int64(len(s)))
+ slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
}
-func dsname(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 75da154fe2..863de5b6c7 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -50,7 +50,7 @@ type Order struct {
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
func order(fn *Node) {
- if Debug['W'] > 1 {
+ if Debug.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
dumplist(s, fn.Nbody)
}
@@ -323,12 +323,7 @@ func (o *Order) stmtList(l Nodes) {
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
func orderMakeSliceCopy(s []*Node) {
- const go115makeslicecopy = true
- if !go115makeslicecopy {
- return
- }
-
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return
}
@@ -1102,7 +1097,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
haslit := false
for _, n1 := range n.List.Slice() {
hasbyte = hasbyte || n1.Op == OBYTES2STR
- haslit = haslit || n1.Op == OLITERAL && len(strlit(n1)) != 0
+ haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0
}
if haslit && hasbyte {
@@ -1274,7 +1269,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
var t *types.Type
switch n.Op {
case OSLICELIT:
- t = types.NewArray(n.Type.Elem(), n.Right.Int64())
+ t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
case OCALLPART:
t = partialCallType(n)
}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 52b1ed351d..353f4b08c9 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -266,8 +266,8 @@ func compile(fn *Node) {
dtypesym(n.Type)
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
- if fn.Func.lsym.Func.StackObjects == nil {
- fn.Func.lsym.Func.StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
+ if fn.Func.lsym.Func().StackObjects == nil {
+ fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
}
}
}
@@ -415,7 +415,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
case PAUTO:
if !n.Name.Used() {
// Text == nil -> generating abstract function
- if fnsym.Func.Text != nil {
+ if fnsym.Func().Text != nil {
Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
continue
@@ -425,7 +425,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
continue
}
apdecls = append(apdecls, n)
- fnsym.Func.RecordAutoType(ngotype(n).Linksym())
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
@@ -435,7 +435,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
- for t, _ := range fnsym.Func.Autot {
+ for t, _ := range fnsym.Func().Autot {
typesyms = append(typesyms, t)
}
sort.Sort(obj.BySymName(typesyms))
@@ -444,7 +444,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
r.Sym = sym
r.Type = objabi.R_USETYPE
}
- fnsym.Func.Autot = nil
+ fnsym.Func().Autot = nil
var varScopes []ScopeID
for _, decl := range decls {
@@ -522,7 +522,7 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
- delete(fnsym.Func.Autot, ngotype(n).Linksym())
+ delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
@@ -667,7 +667,7 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
- fnsym.Func.RecordAutoType(ngotype(n).Linksym())
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
return decls, vars
@@ -731,7 +731,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
}
gotype := ngotype(n).Linksym()
- delete(fnsym.Func.Autot, gotype)
+ delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index a9ea37701e..b471accb65 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -1552,26 +1552,27 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
// Emit the live pointer map data structures
ls := e.curfn.Func.lsym
- ls.Func.GCArgs, ls.Func.GCLocals, ls.Func.GCRegs = lv.emit()
+ fninfo := ls.Func()
+ fninfo.GCArgs, fninfo.GCLocals, fninfo.GCRegs = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ls.Func.GCArgs
+ p.To.Sym = fninfo.GCArgs
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ls.Func.GCLocals
+ p.To.Sym = fninfo.GCLocals
if !go115ReduceLiveness {
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_RegPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ls.Func.GCRegs
+ p.To.Sym = fninfo.GCRegs
}
return lv.livenessMap
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 5434b0167a..1b4d765d42 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -112,12 +112,13 @@ func typecheckrangeExpr(n *Node) {
v2 = nil
}
- var why string
if v1 != nil {
if v1.Name != nil && v1.Name.Defn == n {
v1.Type = t1
- } else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ } else if v1.Type != nil {
+ if op, why := assignop(t1, v1.Type); op == OXXX {
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ }
}
checkassign(n, v1)
}
@@ -125,8 +126,10 @@ func typecheckrangeExpr(n *Node) {
if v2 != nil {
if v2.Name != nil && v2.Name.Defn == n {
v2.Type = t2
- } else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ } else if v2.Type != nil {
+ if op, why := assignop(t2, v2.Type); op == OXXX {
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ }
}
checkassign(n, v2)
}
@@ -463,7 +466,7 @@ func walkrange(n *Node) *Node {
//
// where == for keys of map m is reflexive.
func isMapClear(n *Node) bool {
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return false
}
@@ -530,7 +533,7 @@ func mapClear(m *Node) *Node {
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(n, v1, v2, a *Node) bool {
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return false
}
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 21429af782..229fcfeaee 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -1275,8 +1275,9 @@ func dtypesym(t *types.Type) *obj.LSym {
}
ot = dgopkgpath(lsym, ot, tpkg)
+ xcount := sort.Search(n, func(i int) bool { return !types.IsExported(m[i].name.Name) })
ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
- ot = duintptr(lsym, ot, uint64(n))
+ ot = duintptr(lsym, ot, uint64(xcount))
ot = duintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd)
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go
index d7239d5693..e66b859e10 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/gc/scope.go
@@ -62,9 +62,9 @@ func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
if len(marks) == 0 {
return
}
- p0 := fnsym.Func.Text
+ p0 := fnsym.Func().Text
scope := findScope(marks, p0.Pos)
- for p := fnsym.Func.Text; p != nil; p = p.Link {
+ for p := p0; p != nil; p = p.Link {
if p.Pos == p0.Pos {
continue
}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index af19a96bbc..212fcc022d 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -39,7 +39,7 @@ func (s *InitSchedule) append(n *Node) {
// staticInit adds an initialization statement n to the schedule.
func (s *InitSchedule) staticInit(n *Node) {
if !s.tryStaticInit(n) {
- if Debug['%'] != 0 {
+ if Debug.P != 0 {
Dump("nonstatic", n)
}
s.append(n)
@@ -128,7 +128,7 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
case OSLICELIT:
// copy slice
a := s.inittemps[r]
- slicesym(l, a, r.Right.Int64())
+ slicesym(l, a, r.Right.Int64Val())
return true
case OARRAYLIT, OSTRUCTLIT:
@@ -205,7 +205,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
case OSTR2BYTES:
if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
- sval := strlit(r.Left)
+ sval := r.Left.StringVal()
slicebytes(l, sval)
return true
}
@@ -213,7 +213,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
case OSLICELIT:
s.initplan(r)
// Init slice.
- bound := r.Right.Int64()
+ bound := r.Right.Int64Val()
ta := types.NewArray(r.Type.Elem(), bound)
ta.SetNoalg(true)
a := staticname(ta)
@@ -375,11 +375,6 @@ func readonlystaticname(t *types.Type) *Node {
return n
}
-func isLiteral(n *Node) bool {
- // Treat nils as zeros rather than literals.
- return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
-}
-
func (n *Node) isSimpleName() bool {
return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
}
@@ -404,7 +399,7 @@ const (
func getdyn(n *Node, top bool) initGenType {
switch n.Op {
default:
- if isLiteral(n) {
+ if n.isGoConst() {
return initConst
}
return initDynamic
@@ -413,7 +408,7 @@ func getdyn(n *Node, top bool) initGenType {
if !top {
return initDynamic
}
- if n.Right.Int64()/4 > int64(n.List.Len()) {
+ if n.Right.Int64Val()/4 > int64(n.List.Len()) {
// <25% of entries have explicit values.
// Very rough estimation, it takes 4 bytes of instructions
// to initialize 1 byte of result. So don't use a static
@@ -559,7 +554,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
continue
}
- islit := isLiteral(value)
+ islit := value.isGoConst()
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
continue
}
@@ -589,12 +584,12 @@ func isSmallSliceLit(n *Node) bool {
r := n.Right
- return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64() <= smallArrayBytes/n.Type.Elem().Width)
+ return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
}
func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type.Elem(), n.Right.Int64())
+ t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
dowidth(t)
if ctxt == inNonInitFunction {
@@ -732,7 +727,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
continue
}
- if vstat != nil && isLiteral(value) { // already set by copy from static value
+ if vstat != nil && value.isGoConst() { // already set by copy from static value
continue
}
@@ -993,7 +988,7 @@ func oaslit(n *Node, init *Nodes) bool {
func getlit(lit *Node) int {
if smallintconst(lit) {
- return int(lit.Int64())
+ return int(lit.Int64Val())
}
return -1
}
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 815ff7f99f..fb9d3e811a 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -9,8 +9,8 @@ import (
"fmt"
"html"
"os"
+ "path/filepath"
"sort"
- "strings"
"bufio"
"bytes"
@@ -27,6 +27,7 @@ var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
+var ssaDir string // optional destination for ssa dump file
var ssaDumpStdout bool // whether to dump to stdout
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
@@ -49,21 +50,16 @@ func initssaconfig() {
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
- _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string
_ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
_ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
- _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[TINT16]) // *int16
_ = types.NewPtr(types.Types[TINT64]) // *int64
_ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug['N'] == 0)
- if thearch.LinkArch.Name == "386" {
- ssaConfig.Set387(thearch.Use387)
- }
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
ssaConfig.Race = flag_race
ssaCaches = make([]ssa.Cache, nBackendWorkers)
@@ -174,10 +170,6 @@ func initssaconfig() {
ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU")
}
- // GO386=387 runtime definitions
- ControlWord64trunc = sysvar("controlWord64trunc") // uint16
- ControlWord32 = sysvar("controlWord32") // uint16
-
// Wasm (all asm funcs with special ABIs)
WasmMove = sysvar("wasmMove")
WasmZero = sysvar("wasmZero")
@@ -248,7 +240,7 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
- s.curfn.Func.lsym.Func.OpenCodedDeferInfo = x
+ s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
@@ -347,7 +339,13 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f.Entry.Pos = fn.Pos
if printssa {
- s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f, ssaDumpCFG)
+ ssaDF := ssaDumpFile
+ if ssaDir != "" {
+ ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
+ ssaD := filepath.Dir(ssaDF)
+ os.MkdirAll(ssaD, 0755)
+ }
+ s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
// TODO: generate and print a mapping from nodes to values and blocks
dumpSourcesColumn(s.f.HTMLWriter, fn)
s.f.HTMLWriter.WriteAST("AST", astBuf)
@@ -359,7 +357,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.fwdVars = map[*Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = Debug['N'] == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
switch {
case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
@@ -743,7 +741,7 @@ func (s *state) pushLine(line src.XPos) {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
- if Debug['K'] != 0 {
+ if Debug.K != 0 {
Warn("buildssa: unknown position (line 0)")
}
} else {
@@ -1216,7 +1214,7 @@ func (s *state) stmt(n *Node) {
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 {
+ if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
@@ -1273,7 +1271,7 @@ func (s *state) stmt(n *Node) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
- if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
+ if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
// [0:...] is the same as [:...]
i = nil
}
@@ -1303,7 +1301,7 @@ func (s *state) stmt(n *Node) {
case OIF:
if Isconst(n.Left, CTBOOL) {
s.stmtList(n.Left.Ninit)
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
s.stmtList(n.Nbody)
} else {
s.stmtList(n.Rlist)
@@ -2474,6 +2472,11 @@ func (s *state) expr(n *Node) *ssa.Value {
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ case OANDNOT:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
@@ -2557,22 +2560,22 @@ func (s *state) expr(n *Node) *ssa.Value {
return s.addr(n.Left)
case ORESULT:
- if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall {
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.load(n.Type, addr)
+ return s.rawLoad(n.Type, addr)
}
which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
if which == -1 {
// Do the old thing // TODO: Panic instead.
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.load(n.Type, addr)
+ return s.rawLoad(n.Type, addr)
}
if canSSAType(n.Type) {
return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
- return s.load(n.Type, addr)
+ return s.rawLoad(n.Type, addr)
}
case ODEREF:
@@ -2612,7 +2615,7 @@ func (s *state) expr(n *Node) *ssa.Value {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(strlit(n.Left)[n.Right.Int64()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
}
a := s.expr(n.Left)
i := s.expr(n.Right)
@@ -2621,7 +2624,7 @@ func (s *state) expr(n *Node) *ssa.Value {
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
@@ -3391,6 +3394,13 @@ func init() {
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
@@ -3429,6 +3439,12 @@ func init() {
return nil
},
sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -3530,12 +3546,24 @@ func init() {
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "And",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "Or",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
@@ -3544,9 +3572,19 @@ func init() {
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+ alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
@@ -4251,6 +4289,7 @@ func (s *state) openDeferExit() {
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
zeroval := s.constInt8(types.Types[TUINT8], 0)
+ testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
@@ -4288,23 +4327,38 @@ func (s *state) openDeferExit() {
stksize := fn.Type.ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
+ var callArgs []*ssa.Value
if r.rcvr != nil {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
- s.store(types.Types[TUINTPTR], addr, v)
+ if testLateExpansion {
+ callArgs = append(callArgs, v)
+ } else {
+ s.store(types.Types[TUINTPTR], addr, v)
+ }
}
for j, argAddrVal := range r.argVals {
f := getParam(r.n, j)
pt := types.NewPtr(f.Type)
- addr := s.constOffPtrSP(pt, argStart+f.Offset)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart + f.Offset)})
- if !canSSAType(f.Type) {
- s.move(f.Type, addr, argAddrVal)
+ ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
+ if testLateExpansion {
+ var a *ssa.Value
+ if !canSSAType(f.Type) {
+ a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
+ } else {
+ a = s.load(f.Type, argAddrVal)
+ }
+ callArgs = append(callArgs, a)
} else {
- argVal := s.load(f.Type, argAddrVal)
- s.storeType(f.Type, addr, argVal, 0, false)
+ addr := s.constOffPtrSP(pt, argStart+f.Offset)
+ if !canSSAType(f.Type) {
+ s.move(f.Type, addr, argAddrVal)
+ } else {
+ argVal := s.load(f.Type, argAddrVal)
+ s.storeType(f.Type, addr, argVal, 0, false)
+ }
}
}
var call *ssa.Value
@@ -4312,13 +4366,31 @@ func (s *state) openDeferExit() {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[TUINTPTR], v)
- call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, v, s.mem())
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
+ }
} else {
- // Do a static call if the original call was a static function or method
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ // Do a static call if the original call was a static function or method
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
}
call.AuxInt = stksize
- s.vars[&memVar] = call
+ if testLateExpansion {
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ s.vars[&memVar] = call
+ }
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
@@ -4375,11 +4447,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
switch n.Op {
case OCALLFUNC:
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
sym = fn.Sym
- if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") {
- testLateExpansion = true
- }
break
}
closure = s.expr(fn)
@@ -4392,11 +4462,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
if fn.Op != ODOTMETH {
s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
sym = fn.Sym
- if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") {
- testLateExpansion = true
- }
break
}
closure = s.getMethodClosure(fn)
@@ -4406,6 +4474,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
if fn.Op != ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
}
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
@@ -4424,6 +4493,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
var call *ssa.Value
if k == callDeferStack {
+ testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
d := tempAt(n.Pos, s.curfn, t)
@@ -4474,10 +4544,17 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
}
// Call runtime.deferprocStack with pointer to _defer record.
- arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
- s.store(types.Types[TUINTPTR], arg0, addr)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, addr, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
+ s.store(types.Types[TUINTPTR], arg0, addr)
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
if stksize < int64(Widthptr) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
@@ -4544,9 +4621,21 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// call target
switch {
case k == callDefer:
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc, ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults)
+ if testLateExpansion {
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
case k == callGo:
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc, ACArgs, ACResults), s.mem())
+ aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults)
+ if testLateExpansion {
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
@@ -4554,18 +4643,25 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
- call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
+ if testLateExpansion {
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
+ }
case codeptr != nil:
- call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
+ if testLateExpansion {
+ aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
+ call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
+ }
case sym != nil:
if testLateExpansion {
- var tys []*types.Type
aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults)
- for i := int64(0); i < aux.NResults(); i++ {
- tys = append(tys, aux.TypeOfResult(i))
- }
- tys = append(tys, types.TypeMem)
- call = s.newValue0A(ssa.OpStaticLECall, types.NewResults(tys), aux)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
@@ -4606,7 +4702,11 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
}
fp := res.Field(0)
if returnResultAddr {
- return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
+ pt := types.NewPtr(fp.Type)
+ if testLateExpansion {
+ return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
+ }
+ return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
}
if testLateExpansion {
@@ -4649,7 +4749,7 @@ func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
s.nilCheck(itab)
itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
- rcvr := s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
+ rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
}
@@ -4710,7 +4810,7 @@ func (s *state) addr(n *Node) *ssa.Value {
}
case ORESULT:
// load return from callee
- if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall {
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
return s.constOffPtrSP(t, n.Xoffset)
}
which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
@@ -4770,7 +4870,7 @@ func (s *state) addr(n *Node) *ssa.Value {
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
- if Debug['N'] != 0 {
+ if Debug.N != 0 {
return false
}
for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
@@ -4881,7 +4981,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
- if bounded || Debug['B'] != 0 {
+ if bounded || Debug.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
@@ -5013,15 +5113,22 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
s.prevCall = nil
// Write args to the stack
off := Ctxt.FixedFrameSize()
+ testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
var ACArgs []ssa.Param
var ACResults []ssa.Param
+ var callArgs []*ssa.Value
+
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
- ptr := s.constOffPtrSP(t.PtrTo(), off)
size := t.Size()
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
- s.store(t, ptr, arg)
+ if testLateExpansion {
+ callArgs = append(callArgs, arg)
+ } else {
+ ptr := s.constOffPtrSP(t.PtrTo(), off)
+ s.store(t, ptr, arg)
+ }
off += size
}
off = Rnd(off, int64(Widthreg))
@@ -5035,8 +5142,17 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
}
// Issue call
- call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn, ACArgs, ACResults), s.mem())
- s.vars[&memVar] = call
+ var call *ssa.Value
+ aux := ssa.StaticAuxCall(fn, ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ s.vars[&memVar] = call
+ }
if !returns {
// Finish block
@@ -5052,11 +5168,24 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
// Load results
res := make([]*ssa.Value, len(results))
- for i, t := range results {
- off = Rnd(off, t.Alignment())
- ptr := s.constOffPtrSP(types.NewPtr(t), off)
- res[i] = s.load(t, ptr)
- off += t.Size()
+ if testLateExpansion {
+ for i, t := range results {
+ off = Rnd(off, t.Alignment())
+ if canSSAType(t) {
+ res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
+ } else {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
+ res[i] = s.rawLoad(t, addr)
+ }
+ off += t.Size()
+ }
+ } else {
+ for i, t := range results {
+ off = Rnd(off, t.Alignment())
+ ptr := s.constOffPtrSP(types.NewPtr(t), off)
+ res[i] = s.load(t, ptr)
+ off += t.Size()
+ }
}
off = Rnd(off, int64(Widthptr))
@@ -5093,7 +5222,10 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.store(t, left, right)
case t.IsPtrShaped():
- // no scalar fields.
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ s.store(t, left, right) // see issue 42032
+ }
+ // otherwise, no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
@@ -5137,6 +5269,9 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ break // see issue 42032
+ }
s.store(t, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
@@ -5913,9 +6048,7 @@ type SSAGenState struct {
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
- // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
- SSEto387 map[int16]int16
- // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
+ // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
ScratchFpMem *Node
maxarg int64 // largest frame size for arguments to calls made by the function
@@ -6021,7 +6154,7 @@ func emitStackObjects(e *ssafn, pp *Progs) {
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func.lsym.Func.StackObjects
+ x := e.curfn.Func.lsym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
@@ -6058,7 +6191,7 @@ func genssa(f *ssa.Func, pp *Progs) {
s.livenessMap = liveness(e, f, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func.lsym.Func.OpenCodedDeferInfo
+ openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
@@ -6082,10 +6215,6 @@ func genssa(f *ssa.Func, pp *Progs) {
progToBlock[s.pp.next] = f.Blocks[0]
}
- if thearch.Use387 {
- s.SSEto387 = map[int16]int16{}
- }
-
s.ScratchFpMem = e.scratchFpMem
if Ctxt.Flag_locationlists {
@@ -6208,7 +6337,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
// Emit control flow instructions for block
var next *ssa.Block
- if i < len(f.Blocks)-1 && Debug['N'] == 0 {
+ if i < len(f.Blocks)-1 && Debug.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
@@ -6267,7 +6396,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func.lsym.Func.AddInlMark(p, inlMarks[m])
+ pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
@@ -6279,7 +6408,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func.lsym.Func.AddInlMark(p, inlMarks[p])
+ pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
}
}
}
@@ -6516,7 +6645,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
} else {
lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
}
- if bounded || Debug['B'] != 0 {
+ if bounded || Debug.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
@@ -6790,56 +6919,38 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this string up into two separate variables.
- p := e.splitSlot(&name, ".ptr", 0, ptrType)
- l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
- return p, l
- }
- // Return the two parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
+ // Split this string up into two separate variables.
+ p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+ l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+ return p, l
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
u := types.Types[TUINTPTR]
t := types.NewPtr(types.Types[TUINT8])
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this interface up into two separate variables.
- f := ".itab"
- if n.Type.IsEmptyInterface() {
- f = ".type"
- }
- c := e.splitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
- d := e.splitSlot(&name, ".data", u.Size(), t)
- return c, d
+ // Split this interface up into two separate variables.
+ f := ".itab"
+ if n.Type.IsEmptyInterface() {
+ f = ".type"
}
- // Return the two parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: u, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
+ c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
+ d := e.SplitSlot(&name, ".data", u.Size(), t)
+ return c, d
}
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.Elem())
lenType := types.Types[TINT]
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this slice up into three separate variables.
- p := e.splitSlot(&name, ".ptr", 0, ptrType)
- l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
- c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
- return p, l, c
- }
- // Return the three parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
- ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
- ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
+ p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+ l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+ c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
+ return p, l, c
}
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
@@ -6847,53 +6958,30 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
} else {
t = types.Types[TFLOAT32]
}
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this complex up into two separate variables.
- r := e.splitSlot(&name, ".real", 0, t)
- i := e.splitSlot(&name, ".imag", t.Size(), t)
- return r, i
- }
- // Return the two parts of the larger variable.
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
+ r := e.SplitSlot(&name, ".real", 0, t)
+ i := e.SplitSlot(&name, ".imag", t.Size(), t)
+ return r, i
}
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
var t *types.Type
if name.Type.IsSigned() {
t = types.Types[TINT32]
} else {
t = types.Types[TUINT32]
}
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Split this int64 up into two separate variables.
- if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
- }
- return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
- }
- // Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
+ return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
}
- return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
+ return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
- n := name.N.(*Node)
st := name.Type
- ft := st.FieldType(i)
- var offset int64
- for f := 0; f < i; f++ {
- offset += st.FieldType(f).Size()
- }
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- // Note: the _ field may appear several times. But
- // have no fear, identically-named but distinct Autos are
- // ok, albeit maybe confusing for a debugger.
- return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
- }
- return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
+ // Note: the _ field may appear several times. But
+ // have no fear, identically-named but distinct Autos are
+ // ok, albeit maybe confusing for a debugger.
+ return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
@@ -6903,19 +6991,23 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
e.Fatalf(n.Pos, "bad array size")
}
et := at.Elem()
- if n.Class() == PAUTO && !n.Name.Addrtaken() {
- return e.splitSlot(&name, "[0]", 0, et)
- }
- return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
+ return e.SplitSlot(&name, "[0]", 0, et)
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset)
}
-// splitSlot returns a slot representing the data of parent starting at offset.
-func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
- s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
+// SplitSlot returns a slot representing the data of parent starting at offset.
+func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
+ node := parent.N.(*Node)
+
+ if node.Class() != PAUTO || node.Name.Addrtaken() {
+ // addressed things and non-autos retain their parents (i.e., cannot truly be split)
+ return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
+ }
+
+ s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
n := &Node{
Name: new(Name),
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 07547df36e..defefd76b3 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -96,7 +96,7 @@ func flusherrors() {
}
func hcrash() {
- if Debug['h'] != 0 {
+ if Debug.h != 0 {
flusherrors()
if outfile != "" {
os.Remove(outfile)
@@ -107,7 +107,7 @@ func hcrash() {
}
func linestr(pos src.XPos) string {
- return Ctxt.OutermostPos(pos).Format(Debug['C'] == 0, Debug['L'] == 1)
+ return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
}
// lasterror keeps track of the most recently issued error.
@@ -153,7 +153,7 @@ func yyerrorl(pos src.XPos, format string, args ...interface{}) {
hcrash()
nerrors++
- if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+ if nsavederrors+nerrors >= 10 && Debug.e == 0 {
flusherrors()
fmt.Printf("%v: too many errors\n", linestr(pos))
errorexit()
@@ -175,7 +175,7 @@ func Warn(fmt_ string, args ...interface{}) {
func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
adderr(line, fmt_, args...)
- if Debug['m'] != 0 {
+ if Debug.m != 0 {
flusherrors()
}
}
@@ -222,7 +222,7 @@ func hasUniquePos(n *Node) bool {
}
if !n.Pos.IsKnown() {
- if Debug['K'] != 0 {
+ if Debug.K != 0 {
Warn("setlineno: unknown position (line 0)")
}
return false
@@ -348,7 +348,7 @@ func newname(s *types.Sym) *Node {
return n
}
-// newname returns a new ONAME Node associated with symbol s at position pos.
+// newnamel returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting n.Name.Curfn.
func newnamel(pos src.XPos, s *types.Sym) *Node {
if s == nil {
@@ -546,22 +546,19 @@ func methtype(t *types.Type) *types.Type {
// Is type src assignment compatible to type dst?
// If so, return op code to use in conversion.
-// If not, return OXXX.
-func assignop(src, dst *types.Type, why *string) Op {
- if why != nil {
- *why = ""
- }
-
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func assignop(src, dst *types.Type) (Op, string) {
if src == dst {
- return OCONVNOP
+ return OCONVNOP, ""
}
if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
- return OXXX
+ return OXXX, ""
}
// 1. src type is identical to dst.
if types.Identical(src, dst) {
- return OCONVNOP
+ return OCONVNOP, ""
}
// 2. src and dst have identical underlying types
@@ -575,13 +572,13 @@ func assignop(src, dst *types.Type, why *string) Op {
if src.IsEmptyInterface() {
// Conversion between two empty interfaces
// requires no code.
- return OCONVNOP
+ return OCONVNOP, ""
}
if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
// Conversion between two types, at least one unnamed,
// needs no conversion. The exception is nonempty interfaces
// which need to have their itab updated.
- return OCONVNOP
+ return OCONVNOP, ""
}
}
@@ -590,49 +587,47 @@ func assignop(src, dst *types.Type, why *string) Op {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
- return OCONVIFACE
+ return OCONVIFACE, ""
}
// we'll have complained about this method anyway, suppress spurious messages.
if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
- return OCONVIFACE
- }
-
- if why != nil {
- if isptrto(src, TINTER) {
- *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
- } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
- } else if have != nil && have.Sym == missing.Sym {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else if ptr != 0 {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
- } else if have != nil {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
- } else {
- *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
- }
+ return OCONVIFACE, ""
+ }
+
+ var why string
+ if isptrto(src, TINTER) {
+ why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+ } else if have != nil {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
}
- return OXXX
+ return OXXX, why
}
if isptrto(dst, TINTER) {
- if why != nil {
- *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
- }
- return OXXX
+ why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ return OXXX, why
}
if src.IsInterface() && dst.Etype != TBLANK {
var missing, have *types.Field
var ptr int
- if why != nil && implements(dst, src, &missing, &have, &ptr) {
- *why = ": need type assertion"
+ var why string
+ if implements(dst, src, &missing, &have, &ptr) {
+ why = ": need type assertion"
}
- return OXXX
+ return OXXX, why
}
// 4. src is a bidirectional channel value, dst is a channel type,
@@ -640,7 +635,7 @@ func assignop(src, dst *types.Type, why *string) Op {
// either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
- return OCONVNOP
+ return OCONVNOP, ""
}
}
@@ -653,7 +648,7 @@ func assignop(src, dst *types.Type, why *string) Op {
TCHAN,
TINTER,
TSLICE:
- return OCONVNOP
+ return OCONVNOP, ""
}
}
@@ -661,26 +656,23 @@ func assignop(src, dst *types.Type, why *string) Op {
// 7. Any typed value can be assigned to the blank identifier.
if dst.Etype == TBLANK {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OXXX
+ return OXXX, ""
}
// Can we convert a value of type src to a value of type dst?
// If so, return op code to use in conversion (maybe OCONVNOP).
-// If not, return OXXX.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
// srcConstant indicates whether the value of type src is a constant.
-func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
- if why != nil {
- *why = ""
- }
-
+func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
if src == dst {
- return OCONVNOP
+ return OCONVNOP, ""
}
if src == nil || dst == nil {
- return OXXX
+ return OXXX, ""
}
// Conversions from regular to go:notinheap are not allowed
@@ -688,23 +680,19 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// rules.
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
- if why != nil {
- *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
- }
- return OXXX
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+ return OXXX, why
}
// (b) Disallow string to []T where T is go:notinheap.
if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
- if why != nil {
- *why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
- }
- return OXXX
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+ return OXXX, why
}
// 1. src can be assigned to dst.
- op := assignop(src, dst, why)
+ op, why := assignop(src, dst)
if op != OXXX {
- return op
+ return op, why
}
// The rules for interfaces are no different in conversions
@@ -712,60 +700,57 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// with the good message from assignop.
// Otherwise clear the error.
if src.IsInterface() || dst.IsInterface() {
- return OXXX
- }
- if why != nil {
- *why = ""
+ return OXXX, why
}
// 2. Ignoring struct tags, src and dst have identical underlying types.
if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
- return OCONVNOP
+ return OCONVNOP, ""
}
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types.
if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
- return OCONVNOP
+ return OCONVNOP, ""
}
}
// 4. src and dst are both integer or floating point types.
if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OCONV
+ return OCONV, ""
}
// 5. src and dst are both complex types.
if src.IsComplex() && dst.IsComplex() {
if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OCONV
+ return OCONV, ""
}
// Special case for constant conversions: any numeric
// conversion is potentially okay. We'll validate further
// within evconst. See #38117.
if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
- return OCONV
+ return OCONV, ""
}
// 6. src is an integer or has type []byte or []rune
// and dst is a string type.
if src.IsInteger() && dst.IsString() {
- return ORUNESTR
+ return ORUNESTR, ""
}
if src.IsSlice() && dst.IsString() {
if src.Elem().Etype == types.Bytetype.Etype {
- return OBYTES2STR
+ return OBYTES2STR, ""
}
if src.Elem().Etype == types.Runetype.Etype {
- return ORUNES2STR
+ return ORUNES2STR, ""
}
}
@@ -773,21 +758,21 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// String to slice.
if src.IsString() && dst.IsSlice() {
if dst.Elem().Etype == types.Bytetype.Etype {
- return OSTR2BYTES
+ return OSTR2BYTES, ""
}
if dst.Elem().Etype == types.Runetype.Etype {
- return OSTR2RUNES
+ return OSTR2RUNES, ""
}
}
// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
- return OCONVNOP
+ return OCONVNOP, ""
}
// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
- return OCONVNOP
+ return OCONVNOP, ""
}
// src is map and dst is a pointer to corresponding hmap.
@@ -795,10 +780,10 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// go gc maps are implemented as a pointer to a hmap struct.
if src.Etype == TMAP && dst.IsPtr() &&
src.MapType().Hmap == dst.Elem() {
- return OCONVNOP
+ return OCONVNOP, ""
}
- return OXXX
+ return OXXX, ""
}
func assignconv(n *Node, t *types.Type, context string) *Node {
@@ -825,7 +810,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
- if n.Type == types.Idealbool && !t.IsBoolean() {
+ if n.Type == types.UntypedBool && !t.IsBoolean() {
if n.Op == ONAME || n.Op == OLITERAL {
r := nod(OCONVNOP, n, nil)
r.Type = types.Types[TBOOL]
@@ -839,8 +824,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
return n
}
- var why string
- op := assignop(n.Type, t, &why)
+ op, why := assignop(n.Type, t)
if op == OXXX {
yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
op = OCONV
@@ -1040,25 +1024,24 @@ func calcHasCall(n *Node) bool {
return false
}
-func badtype(op Op, tl *types.Type, tr *types.Type) {
- fmt_ := ""
+func badtype(op Op, tl, tr *types.Type) {
+ var s string
if tl != nil {
- fmt_ += fmt.Sprintf("\n\t%v", tl)
+ s += fmt.Sprintf("\n\t%v", tl)
}
if tr != nil {
- fmt_ += fmt.Sprintf("\n\t%v", tr)
+ s += fmt.Sprintf("\n\t%v", tr)
}
// common mistake: *struct and *interface.
if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
- fmt_ += "\n\t(*struct vs *interface)"
+ s += "\n\t(*struct vs *interface)"
} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
- fmt_ += "\n\t(*interface vs *struct)"
+ s += "\n\t(*interface vs *struct)"
}
}
- s := fmt_
yyerror("illegal types for operand: %v%s", op, s)
}
@@ -1523,7 +1506,7 @@ func structargs(tl *types.Type, mustname bool) []*Node {
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
- if false && Debug['r'] != 0 {
+ if false && Debug.r != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
@@ -1596,7 +1579,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
fn.Nbody.Append(call)
}
- if false && Debug['r'] != 0 {
+ if false && Debug.r != 0 {
dumplist("genwrapper body", fn.Nbody)
}
@@ -1737,7 +1720,7 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
// the method does not exist for value types.
rcvr := tm.Type.Recv().Type
if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
- if false && Debug['r'] != 0 {
+ if false && Debug.r != 0 {
yyerror("interface pointer mismatch")
}
@@ -1871,8 +1854,10 @@ func isdirectiface(t *types.Type) bool {
}
switch t.Etype {
- case TPTR,
- TCHAN,
+ case TPTR:
+ // Pointers to notinheap types must be stored indirectly. See issue 42076.
+ return !t.Elem().NotInHeap()
+ case TCHAN,
TMAP,
TFUNC,
TUNSAFEPTR:
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index 138b0acc53..8d9fbe300e 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -189,16 +189,19 @@ func typecheckExprSwitch(n *Node) {
continue
}
- switch {
- case nilonly != "" && !n1.isNil():
+ if nilonly != "" && !n1.isNil() {
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
- case t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type):
+ } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
- case assignop(n1.Type, t, nil) == 0 && assignop(t, n1.Type, nil) == 0:
- if n.Left != nil {
- yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
- } else {
- yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ } else {
+ op1, _ := assignop(n1.Type, t)
+ op2, _ := assignop(t, n1.Type)
+ if op1 == OXXX && op2 == OXXX {
+ if n.Left != nil {
+ yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
+ } else {
+ yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ }
}
}
@@ -358,8 +361,8 @@ func (s *exprSwitch) flush() {
// all we need here is consistency. We respect this
// sorting below.
sort.Slice(cc, func(i, j int) bool {
- si := strlit(cc[i].lo)
- sj := strlit(cc[j].lo)
+ si := cc[i].lo.StringVal()
+ sj := cc[j].lo.StringVal()
if len(si) != len(sj) {
return len(si) < len(sj)
}
@@ -368,7 +371,7 @@ func (s *exprSwitch) flush() {
// runLen returns the string length associated with a
// particular run of exprClauses.
- runLen := func(run []exprClause) int64 { return int64(len(strlit(run[0].lo))) }
+ runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) }
// Collapse runs of consecutive strings with the same length.
var runs [][]exprClause
@@ -405,7 +408,7 @@ func (s *exprSwitch) flush() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
- if last.jmp == c.jmp && last.hi.Int64()+1 == c.lo.Int64() {
+ if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() {
last.hi = c.lo
} else {
merged = append(merged, c)
@@ -440,7 +443,7 @@ func (c *exprClause) test(exprname *Node) *Node {
// Optimize "switch true { ...}" and "switch false { ... }".
if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
- if exprname.Val().U.(bool) {
+ if exprname.BoolVal() {
return c.lo
} else {
return nodl(c.pos, ONOT, c.lo, nil)
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
index 4aa2e230ce..58de9b5e3f 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -142,7 +142,7 @@ const (
_, _ // second nodeInitorder bit
_, nodeHasBreak
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
- _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND
+ _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
_, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this
_, nodeColas // OAS resulting from :=
@@ -247,7 +247,7 @@ func (n *Node) Val() Val {
// SetVal sets the Val for the node, which must not have been used with SetOpt.
func (n *Node) SetVal(v Val) {
if n.HasOpt() {
- Debug['h'] = 1
+ Debug.h = 1
Dump("have Opt", n)
Fatalf("have Opt")
}
@@ -270,7 +270,7 @@ func (n *Node) SetOpt(x interface{}) {
return
}
if n.HasVal() {
- Debug['h'] = 1
+ Debug.h = 1
Dump("have Val", n)
Fatalf("have Val")
}
@@ -460,14 +460,14 @@ type Param struct {
// x1 := xN.Defn
// x1.Innermost = xN.Outer
//
- // We leave xN.Innermost set so that we can still get to the original
+ // We leave x1.Innermost set so that we can still get to the original
// variable quickly. Not shown here, but once we're
// done parsing a function and no longer need xN.Outer for the
- // lexical x reference links as described above, closurebody
+ // lexical x reference links as described above, funcLit
// recomputes xN.Outer as the semantic x reference link tree,
// even filling in x in intermediate closures that might not
// have mentioned it along the way to inner closures that did.
- // See closurebody for details.
+ // See funcLit for details.
//
// During the eventual compilation, then, for closure variables we have:
//
@@ -480,11 +480,87 @@ type Param struct {
Innermost *Node
Outer *Node
- // OTYPE
- //
- // TODO: Should Func pragmas also be stored on the Name?
- Pragma PragmaFlag
- Alias bool // node is alias for Ntype (only used when type-checking ODCLTYPE)
+ // OTYPE & ONAME //go:embed info,
+ // sharing storage to reduce gc.Param size.
+ // Extra is nil, or else *Extra is a *paramType or an *embedFileList.
+ Extra *interface{}
+}
+
+type paramType struct {
+ flag PragmaFlag
+ alias bool
+}
+
+type embedFileList []string
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (p *Param) Pragma() PragmaFlag {
+ if p.Extra == nil {
+ return 0
+ }
+ return (*p.Extra).(*paramType).flag
+}
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (p *Param) SetPragma(flag PragmaFlag) {
+ if p.Extra == nil {
+ if flag == 0 {
+ return
+ }
+ p.Extra = new(interface{})
+ *p.Extra = &paramType{flag: flag}
+ return
+ }
+ (*p.Extra).(*paramType).flag = flag
+}
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (p *Param) Alias() bool {
+ if p.Extra == nil {
+ return false
+ }
+ t, ok := (*p.Extra).(*paramType)
+ if !ok {
+ return false
+ }
+ return t.alias
+}
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (p *Param) SetAlias(alias bool) {
+ if p.Extra == nil {
+ if !alias {
+ return
+ }
+ p.Extra = new(interface{})
+ *p.Extra = &paramType{alias: alias}
+ return
+ }
+ (*p.Extra).(*paramType).alias = alias
+}
+
+// EmbedFiles returns the list of embedded files for p,
+// which must be for an ONAME var.
+func (p *Param) EmbedFiles() []string {
+ if p.Extra == nil {
+ return nil
+ }
+ return *(*p.Extra).(*embedFileList)
+}
+
+// SetEmbedFiles sets the list of embedded files for p,
+// which must be for an ONAME var.
+func (p *Param) SetEmbedFiles(list []string) {
+ if p.Extra == nil {
+ if len(list) == 0 {
+ return
+ }
+ f := embedFileList(list)
+ p.Extra = new(interface{})
+ *p.Extra = &f
+ return
+ }
+ *(*p.Extra).(*embedFileList) = list
}
// Functions
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 0eb0dae373..8ebeaf1330 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -257,12 +257,12 @@ func typecheck(n *Node, top int) (res *Node) {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
- if n1.Name != nil && !n1.Name.Param.Alias {
+ if n1.Name != nil && !n1.Name.Param.Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
- if n.Name != nil && n.Name.Param.Alias && n.Type == nil {
+ if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
lineno = n.Pos
Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
@@ -361,7 +361,7 @@ func typecheck1(n *Node, top int) (res *Node) {
ok |= ctxExpr
if n.Type == nil && n.Val().Ctype() == CTSTR {
- n.Type = types.Idealstring
+ n.Type = types.UntypedString
}
case ONONAME:
@@ -623,8 +623,8 @@ func typecheck1(n *Node, top int) (res *Node) {
// no defaultlit for left
// the outer context gives the type
n.Type = l.Type
- if (l.Type == types.Idealfloat || l.Type == types.Idealcomplex) && r.Op == OLITERAL {
- n.Type = types.Idealint
+ if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL {
+ n.Type = types.UntypedInt
}
break
@@ -674,8 +674,8 @@ func typecheck1(n *Node, top int) (res *Node) {
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
if r.Type.Etype != TBLANK {
- aop = assignop(l.Type, r.Type, nil)
- if aop != 0 {
+ aop, _ = assignop(l.Type, r.Type)
+ if aop != OXXX {
if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
n.Type = nil
@@ -696,8 +696,8 @@ func typecheck1(n *Node, top int) (res *Node) {
}
if !converted && l.Type.Etype != TBLANK {
- aop = assignop(r.Type, l.Type, nil)
- if aop != 0 {
+ aop, _ = assignop(r.Type, l.Type)
+ if aop != OXXX {
if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
n.Type = nil
@@ -777,7 +777,7 @@ func typecheck1(n *Node, top int) (res *Node) {
if iscmp[n.Op] {
evconst(n)
- t = types.Idealbool
+ t = types.UntypedBool
if n.Op != OLITERAL {
l, r = defaultlit2(l, r, true)
n.Left = l
@@ -1046,13 +1046,13 @@ func typecheck1(n *Node, top int) (res *Node) {
}
if !n.Bounded() && Isconst(n.Right, CTINT) {
- x := n.Right.Int64()
+ x := n.Right.Int64Val()
if x < 0 {
yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
} else if t.IsArray() && x >= t.NumElem() {
yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
- } else if Isconst(n.Left, CTSTR) && x >= int64(len(strlit(n.Left))) {
- yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(strlit(n.Left)))
+ } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) {
+ yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
} else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
yyerror("invalid %s index %v (index too large)", why, n.Right)
}
@@ -1148,11 +1148,11 @@ func typecheck1(n *Node, top int) (res *Node) {
l = defaultlit(l, types.Types[TINT])
c = defaultlit(c, types.Types[TINT])
- if Isconst(l, CTINT) && l.Int64() < 0 {
+ if Isconst(l, CTINT) && l.Int64Val() < 0 {
Fatalf("len for OSLICEHEADER must be non-negative")
}
- if Isconst(c, CTINT) && c.Int64() < 0 {
+ if Isconst(c, CTINT) && c.Int64Val() < 0 {
Fatalf("cap for OSLICEHEADER must be non-negative")
}
@@ -1201,7 +1201,7 @@ func typecheck1(n *Node, top int) (res *Node) {
if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
Fatalf("len for OMAKESLICECOPY too large")
}
- if n.Left.Int64() < 0 {
+ if n.Left.Int64Val() < 0 {
Fatalf("len for OMAKESLICECOPY must be non-negative")
}
}
@@ -1458,7 +1458,7 @@ func typecheck1(n *Node, top int) (res *Node) {
// Determine result type.
switch t.Etype {
case TIDEAL:
- n.Type = types.Idealfloat
+ n.Type = types.UntypedFloat
case TCOMPLEX64:
n.Type = types.Types[TFLOAT32]
case TCOMPLEX128:
@@ -1504,7 +1504,7 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
case TIDEAL:
- t = types.Idealcomplex
+ t = types.UntypedComplex
case TFLOAT32:
t = types.Types[TCOMPLEX64]
@@ -1691,7 +1691,7 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
}
var why string
- n.Op = convertop(n.Left.Op == OLITERAL, t, n.Type, &why)
+ n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type)
if n.Op == OXXX {
if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
@@ -2187,14 +2187,14 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
}
if r.Op == OLITERAL {
- if r.Int64() < 0 {
+ if r.Int64Val() < 0 {
yyerror("invalid slice index %v (index must be non-negative)", r)
return false
- } else if tp != nil && tp.NumElem() >= 0 && r.Int64() > tp.NumElem() {
+ } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() {
yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
return false
- } else if Isconst(l, CTSTR) && r.Int64() > int64(len(strlit(l))) {
- yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(strlit(l)))
+ } else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) {
+ yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
return false
} else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
yyerror("invalid slice index %v (index too large)", r)
@@ -2516,7 +2516,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
- } else if tt.IsPtr() && !rcvr.IsPtr() && types.Identical(tt.Elem(), rcvr) {
+ } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
n.Left = nod(ODEREF, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, ctxType|ctxExpr)
@@ -2724,9 +2724,9 @@ func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
// e.g in error messages about wrong arguments to return.
func sigrepr(t *types.Type, isddd bool) string {
switch t {
- case types.Idealstring:
+ case types.UntypedString:
return "string"
- case types.Idealbool:
+ case types.UntypedBool:
return "bool"
}
@@ -3267,9 +3267,7 @@ func typecheckas(n *Node) {
}
func checkassignto(src *types.Type, dst *Node) {
- var why string
-
- if assignop(src, dst.Type, &why) == 0 {
+ if op, why := assignop(src, dst.Type); op == OXXX {
yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
@@ -3450,9 +3448,8 @@ func stringtoruneslit(n *Node) *Node {
}
var l []*Node
- s := strlit(n.Left)
i := 0
- for _, r := range s {
+ for _, r := range n.Left.StringVal() {
l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
i++
}
@@ -3507,7 +3504,7 @@ func setUnderlying(t, underlying *types.Type) {
}
// Propagate go:notinheap pragma from the Name to the Type.
- if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma&NotInHeap != 0 {
+ if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
t.SetNotInHeap(true)
}
@@ -3679,7 +3676,7 @@ func typecheckdef(n *Node) {
n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
case OTYPE:
- if p := n.Name.Param; p.Alias {
+ if p := n.Name.Param; p.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
// If we have a syntax error, p.Ntype may be nil.
@@ -3904,7 +3901,7 @@ func deadcodefn(fn *Node) {
return
}
case OFOR:
- if !Isconst(n.Left, CTBOOL) || n.Left.Bool() {
+ if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() {
return
}
default:
@@ -3934,7 +3931,7 @@ func deadcodeslice(nn Nodes) {
n.Left = deadcodeexpr(n.Left)
if Isconst(n.Left, CTBOOL) {
var body Nodes
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
n.Rlist = Nodes{}
body = n.Nbody
} else {
@@ -3977,7 +3974,7 @@ func deadcodeexpr(n *Node) *Node {
n.Left = deadcodeexpr(n.Left)
n.Right = deadcodeexpr(n.Right)
if Isconst(n.Left, CTBOOL) {
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
return n.Right // true && x => x
} else {
return n.Left // false && x => false
@@ -3987,7 +3984,7 @@ func deadcodeexpr(n *Node) *Node {
n.Left = deadcodeexpr(n.Left)
n.Right = deadcodeexpr(n.Right)
if Isconst(n.Left, CTBOOL) {
- if n.Left.Bool() {
+ if n.Left.BoolVal() {
return n.Left // true || x => true
} else {
return n.Right // false || x => x
diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go
index 04861c8dd4..ff8cabd8e3 100644
--- a/src/cmd/compile/internal/gc/universe.go
+++ b/src/cmd/compile/internal/gc/universe.go
@@ -123,21 +123,21 @@ func lexinit() {
asNode(s2.Def).SetSubOp(s.op)
}
- types.Idealstring = types.New(TSTRING)
- types.Idealbool = types.New(TBOOL)
+ types.UntypedString = types.New(TSTRING)
+ types.UntypedBool = types.New(TBOOL)
types.Types[TANY] = types.New(TANY)
s := builtinpkg.Lookup("true")
s.Def = asTypesNode(nodbool(true))
asNode(s.Def).Sym = lookup("true")
asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.Idealbool
+ asNode(s.Def).Type = types.UntypedBool
s = builtinpkg.Lookup("false")
s.Def = asTypesNode(nodbool(false))
asNode(s.Def).Sym = lookup("false")
asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.Idealbool
+ asNode(s.Def).Type = types.UntypedBool
s = lookup("_")
s.Block = -100
@@ -351,7 +351,7 @@ func typeinit() {
sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
dowidth(types.Types[TSTRING])
- dowidth(types.Idealstring)
+ dowidth(types.UntypedString)
}
func makeErrorInterface() *types.Type {
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 8e45059eab..b453e9f1d9 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -21,7 +21,7 @@ const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
func walk(fn *Node) {
Curfn = fn
- if Debug['W'] != 0 {
+ if Debug.W != 0 {
s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
@@ -63,14 +63,14 @@ func walk(fn *Node) {
return
}
walkstmtlist(Curfn.Nbody.Slice())
- if Debug['W'] != 0 {
+ if Debug.W != 0 {
s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
zeroResults()
heapmoves()
- if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
+ if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Func.Enter)
}
@@ -336,19 +336,6 @@ func walkstmt(n *Node) *Node {
return n
}
-func isSmallMakeSlice(n *Node) bool {
- if n.Op != OMAKESLICE {
- return false
- }
- r := n.Right
- if r == nil {
- r = n.Left
- }
- t := n.Type
-
- return smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < maxImplicitStackVarSize/t.Elem().Width)
-}
-
// walk the whole tree of the body of an
// expression or simple statement.
// the types expressions are calculated.
@@ -449,7 +436,7 @@ func walkexpr(n *Node, init *Nodes) *Node {
lno := setlineno(n)
- if Debug['w'] > 1 {
+ if Debug.w > 1 {
Dump("before walk expr", n)
}
@@ -487,7 +474,7 @@ opswitch:
ODEREF, OSPTR, OITAB, OIDATA, OADDR:
n.Left = walkexpr(n.Left, init)
- case OEFACE, OAND, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
+ case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@@ -978,14 +965,6 @@ opswitch:
fn := basicnames[param] + "to" + basicnames[result]
n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
- case OANDNOT:
- n.Left = walkexpr(n.Left, init)
- n.Op = OAND
- n.SetImplicit(true) // for walkCheckPtrArithmetic
- n.Right = nod(OBITNOT, n.Right, nil)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Right = walkexpr(n.Right, init)
-
case ODIV, OMOD:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
@@ -1014,7 +993,7 @@ opswitch:
// The SSA backend will handle those.
switch et {
case TINT64:
- c := n.Right.Int64()
+ c := n.Right.Int64Val()
if c < 0 {
c = -c
}
@@ -1022,7 +1001,7 @@ opswitch:
break opswitch
}
case TUINT64:
- c := uint64(n.Right.Int64())
+ c := uint64(n.Right.Int64Val())
if c != 0 && c&(c-1) == 0 {
break opswitch
}
@@ -1062,15 +1041,15 @@ opswitch:
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
- if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
+ if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
yyerror("index out of bounds")
}
} else if Isconst(n.Left, CTSTR) {
- n.SetBounded(bounded(r, int64(len(strlit(n.Left)))))
- if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
+ n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
+ if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
@@ -1339,8 +1318,8 @@ opswitch:
yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
if n.Esc == EscNone {
- if !isSmallMakeSlice(n) {
- Fatalf("non-small OMAKESLICE with EscNone: %v", n)
+ if why := heapAllocReason(n); why != "" {
+ Fatalf("%v has EscNone, but %v", n, why)
}
// var arr [r]T
// n = arr[:l]
@@ -1504,7 +1483,7 @@ opswitch:
case OSTR2BYTES:
s := n.Left
if Isconst(s, CTSTR) {
- sc := strlit(s)
+ sc := s.StringVal()
// Allocate a [n]byte of the right size.
t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
@@ -1612,7 +1591,7 @@ opswitch:
updateHasCall(n)
- if Debug['w'] != 0 && n != nil {
+ if Debug.w != 0 && n != nil {
Dump("after walk expr", n)
}
@@ -1932,7 +1911,7 @@ func walkprint(nn *Node, init *Nodes) *Node {
for i := 0; i < len(s); {
var strs []string
for i < len(s) && Isconst(s[i], CTSTR) {
- strs = append(strs, strlit(s[i]))
+ strs = append(strs, s[i].StringVal())
i++
}
if len(strs) > 0 {
@@ -1978,7 +1957,17 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printiface")
}
on = substArgTypes(on, n.Type) // any-1
- case TPTR, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ case TPTR:
+ if n.Type.Elem().NotInHeap() {
+ on = syslook("printuintptr")
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUNSAFEPTR]
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUINTPTR]
+ break
+ }
+ fallthrough
+ case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
on = syslook("printpointer")
on = substArgTypes(on, n.Type) // any-1
case TSLICE:
@@ -2001,7 +1990,7 @@ func walkprint(nn *Node, init *Nodes) *Node {
case TSTRING:
cs := ""
if Isconst(n, CTSTR) {
- cs = strlit(n)
+ cs = n.StringVal()
}
switch cs {
case " ":
@@ -2170,7 +2159,7 @@ func reorder3(all []*Node) []*Node {
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
- if !aliased(n, all, i) {
+ if !aliased(n, all[:i]) {
return n
}
@@ -2202,73 +2191,75 @@ func outervalue(n *Node) *Node {
}
}
-// Is it possible that the computation of n might be
-// affected by writes in as up to but not including the ith element?
-func aliased(n *Node, all []*Node, i int) bool {
- if n == nil {
+// Is it possible that the computation of r might be
+// affected by assignments in all?
+func aliased(r *Node, all []*Node) bool {
+ if r == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
- for n.Op == ODOT {
- n = n.Left
+ for r.Op == ODOT {
+ r = r.Left
}
// Look for obvious aliasing: a variable being assigned
// during the all list and appearing in n.
- // Also record whether there are any writes to main memory.
- // Also record whether there are any writes to variables
- // whose addresses have been taken.
+ // Also record whether there are any writes to addressable
+ // memory (either main memory or variables whose addresses
+ // have been taken).
memwrite := false
- varwrite := false
- for _, an := range all[:i] {
- a := outervalue(an.Left)
-
- for a.Op == ODOT {
- a = a.Left
+ for _, as := range all {
+ // We can ignore assignments to blank.
+ if as.Left.isBlank() {
+ continue
}
- if a.Op != ONAME {
+ l := outervalue(as.Left)
+ if l.Op != ONAME {
memwrite = true
continue
}
- switch n.Class() {
+ switch l.Class() {
default:
- varwrite = true
+ Fatalf("unexpected class: %v, %v", l, l.Class())
+
+ case PAUTOHEAP, PEXTERN:
+ memwrite = true
continue
case PAUTO, PPARAM, PPARAMOUT:
- if n.Name.Addrtaken() {
- varwrite = true
+ if l.Name.Addrtaken() {
+ memwrite = true
continue
}
- if vmatch2(a, n) {
- // Direct hit.
+ if vmatch2(l, r) {
+ // Direct hit: l appears in r.
return true
}
}
}
- // The variables being written do not appear in n.
- // However, n might refer to computed addresses
+ // The variables being written do not appear in r.
+ // However, r might refer to computed addresses
// that are being written.
// If no computed addresses are affected by the writes, no aliasing.
- if !memwrite && !varwrite {
+ if !memwrite {
return false
}
- // If n does not refer to computed addresses
- // (that is, if n only refers to variables whose addresses
+ // If r does not refer to computed addresses
+ // (that is, if r only refers to variables whose addresses
// have not been taken), no aliasing.
- if varexpr(n) {
+ if varexpr(r) {
return false
}
- // Otherwise, both the writes and n refer to computed memory addresses.
+ // Otherwise, both the writes and r refer to computed memory addresses.
// Assume that they might conflict.
return true
}
@@ -2656,7 +2647,7 @@ func addstr(n *Node, init *Nodes) *Node {
sz := int64(0)
for _, n1 := range n.List.Slice() {
if n1.Op == OLITERAL {
- sz += int64(len(strlit(n1)))
+ sz += int64(len(n1.StringVal()))
}
}
@@ -2830,7 +2821,7 @@ func appendslice(n *Node, init *Nodes) *Node {
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
func isAppendOfMake(n *Node) bool {
- if Debug['N'] != 0 || instrumenting {
+ if Debug.N != 0 || instrumenting {
return false
}
@@ -3450,7 +3441,7 @@ func walkcompare(n *Node, init *Nodes) *Node {
func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64() < 0 {
+ if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
n = copyexpr(n, n.Type, init)
}
@@ -3520,7 +3511,7 @@ func walkcompareString(n *Node, init *Nodes) *Node {
// Length-only checks are ok, though.
maxRewriteLen = 0
}
- if s := strlit(cs); len(s) <= maxRewriteLen {
+ if s := cs.StringVal(); len(s) <= maxRewriteLen {
if len(s) > 0 {
ncs = safeexpr(ncs, init)
}
@@ -3615,26 +3606,32 @@ func bounded(n *Node, max int64) bool {
bits := int32(8 * n.Type.Width)
if smallintconst(n) {
- v := n.Int64()
+ v := n.Int64Val()
return 0 <= v && v < max
}
switch n.Op {
- case OAND:
+ case OAND, OANDNOT:
v := int64(-1)
- if smallintconst(n.Left) {
- v = n.Left.Int64()
- } else if smallintconst(n.Right) {
- v = n.Right.Int64()
+ switch {
+ case smallintconst(n.Left):
+ v = n.Left.Int64Val()
+ case smallintconst(n.Right):
+ v = n.Right.Int64Val()
+ if n.Op == OANDNOT {
+ v = ^v
+ if !sign {
+ v &= 1<<uint(bits) - 1
+ }
+ }
}
-
if 0 <= v && v < max {
return true
}
case OMOD:
if !sign && smallintconst(n.Right) {
- v := n.Right.Int64()
+ v := n.Right.Int64Val()
if 0 <= v && v <= max {
return true
}
@@ -3642,7 +3639,7 @@ func bounded(n *Node, max int64) bool {
case ODIV:
if !sign && smallintconst(n.Right) {
- v := n.Right.Int64()
+ v := n.Right.Int64Val()
for bits > 0 && v >= 2 {
bits--
v >>= 1
@@ -3651,7 +3648,7 @@ func bounded(n *Node, max int64) bool {
case ORSH:
if !sign && smallintconst(n.Right) {
- v := n.Right.Int64()
+ v := n.Right.Int64Val()
if v > int64(bits) {
return true
}
@@ -3892,6 +3889,16 @@ func wrapCall(n *Node, init *Nodes) *Node {
}
isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
+
+ // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
+ if !isBuiltinCall && n.IsDDD() {
+ last := n.List.Len() - 1
+ if va := n.List.Index(last); va.Op == OSLICELIT {
+ n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ n.SetIsDDD(false)
+ }
+ }
+
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
origArgs := make([]*Node, n.List.Len())
t := nod(OTFUNC, nil, nil)
@@ -3977,7 +3984,7 @@ func canMergeLoads() bool {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
func isRuneCount(n *Node) bool {
- return Debug['N'] == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
+ return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
}
func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
@@ -4046,12 +4053,8 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
case OADD:
walk(n.Left)
walk(n.Right)
- case OSUB:
+ case OSUB, OANDNOT:
walk(n.Left)
- case OAND:
- if n.Implicit() { // was OANDNOT
- walk(n.Left)
- }
case OCONVNOP:
if n.Left.Type.IsUnsafePtr() {
n.Left = cheapexpr(n.Left, init)